source
stringlengths
3
92
original_c
stringlengths
26
2.25M
no_omp_formatted
stringlengths
0
2.25M
omp_formatted
stringlengths
0
2.25M
GB_unaryop__identity_bool_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_int32 // op(A') function: GB_tran__identity_bool_int32 // C type: bool // A type: int32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_int32 ( bool *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_int32 // op(A') function: GB_tran__identity_bool_int32 // C type: bool // A type: int32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_int32 ( bool *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_int32 // op(A') function: GB_tran__identity_bool_int32 // C type: bool // A type: int32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_int32 ( bool *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
progressbar.h
// The MIT License (MIT) // // Copyright (c) 2019 Luigi Pertoldi // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // // ============================================================================ // ___ ___ ___ __ ___ ____ __ __ ___ __ ___ // | |_) | |_) / / \ / /`_ | |_) | |_ ( (` ( (` | |_) / /\ | |_) // |_| |_| \ \_\_/ \_\_/ |_| \ |_|__ _)_) _)_) |_|_) /_/--\ |_| \ // // Very simple progress bar for c++ loops with internal running variable // // Author: Luigi Pertoldi // Created: 3 dic 2016 // // Notes: The bar must be used when there's no other possible source of output // inside the for loop // //# progressbar // //A very simple, header-only, fully customizable, progress bar (with percentage) //for c++ loops. // //Very simple to set up: //```cpp //#include "progressbar.hpp" // //int main() { // progressbar bar(100); // for (int i = 0; i < 100; ++i) { // bar.update(); // // ... the program // } // return 0; //} //``` //![animated gif](.github/example-simple.gif) // //Allows customization: //```cpp //#include "progressbar.hpp" // //int main() { // progressbar bar(100); // bar.set_todo_char(" "); // bar.set_done_char("█"); // bar.set_opening_bracket_char("{"); // bar.set_closing_bracket_char("}"); // for (int i = 0; i < 100; ++i) { // bar.update(); // // ... the program // } // return 0; //} //``` //![animated gif](.github/example-custom.gif) // //## Notes // // To use the bar in parallelized loops call `progressbar::update` in a critical // section. With [OpenMP](http://www.openmp.org) this can be achieved with the //following structure: //```cpp //#pragma omp parallel for //for ( ... ) { //#pragma omp critical //bar.update(); //} //``` #ifndef __PROGRESSBAR_HPP #define __PROGRESSBAR_HPP #include <iostream> #include <string> #include <stdexcept> class progressbar { public: // default destructor ~progressbar() = default; // delete everything else progressbar (progressbar const&) = delete; progressbar& operator=(progressbar const&) = delete; progressbar (progressbar&&) = delete; progressbar& operator=(progressbar&&) = delete; // default constructor, must call set_niter later progressbar(); progressbar(int n, bool showbar=true); // reset bar to use it again void reset(); // set number of loop iterations void set_niter(int iter); // chose your style void set_done_char(const std::string& sym) {done_char = sym;} void set_todo_char(const std::string& sym) {todo_char = sym;} void set_opening_bracket_char(const std::string& sym) {opening_bracket_char = sym;} void set_closing_bracket_char(const std::string& sym) {closing_bracket_char = sym;} // to show only the percentage void show_bar(bool flag = true) {do_show_bar = flag;} // main function void update(); private: int progress; int n_cycles; int last_perc; bool do_show_bar; bool update_is_called; std::string done_char; std::string todo_char; std::string opening_bracket_char; std::string closing_bracket_char; }; progressbar::progressbar() : progress(0), n_cycles(0), last_perc(0), do_show_bar(true), update_is_called(false), done_char("#"), todo_char(" "), opening_bracket_char("["), closing_bracket_char("]") {} progressbar::progressbar(int n, bool showbar) : progress(0), n_cycles(n), last_perc(0), do_show_bar(showbar), update_is_called(false), done_char("#"), todo_char(" "), opening_bracket_char("["), closing_bracket_char("]") {} void progressbar::reset() { progress = 0, update_is_called = false; last_perc = 0; return; } void progressbar::set_niter(int niter) { if (niter <= 0) throw std::invalid_argument( "progressbar::set_niter: number of iterations null or negative"); n_cycles = niter; return; } void progressbar::update() { if (n_cycles == 0) throw std::runtime_error( "progressbar::update: number of cycles not set"); if (!update_is_called) { if (do_show_bar == true) { std::cout << opening_bracket_char; for (int _ = 0; _ < 50; _++) std::cout << todo_char; std::cout << closing_bracket_char << " 0%"; } else std::cout << "0%"; } update_is_called = true; int perc = 0; // compute percentage, if did not change, do nothing and return perc = progress*100./(n_cycles-1); if (perc < last_perc) return; // update percentage each unit if (perc == last_perc + 1) { // erase the correct number of characters if (perc <= 10) std::cout << "\b\b" << perc << '%'; else if (perc > 10 and perc < 100) std::cout << "\b\b\b" << perc << '%'; else if (perc == 100) std::cout << "\b\b\b" << perc << '%'; } if (do_show_bar == true) { // update bar every ten units if (perc % 2 == 0) { // erase closing bracket std::cout << std::string(closing_bracket_char.size(), '\b'); // erase trailing percentage characters if (perc < 10) std::cout << "\b\b\b"; else if (perc >= 10 && perc < 100) std::cout << "\b\b\b\b"; else if (perc == 100) std::cout << "\b\b\b\b\b"; // erase 'todo_char' for (int j = 0; j < 50-(perc-1)/2; ++j) { std::cout << std::string(todo_char.size(), '\b'); } // add one additional 'done_char' if (perc == 0) std::cout << todo_char; else std::cout << done_char; // refill with 'todo_char' for (int j = 0; j < 50-(perc-1)/2-1; ++j) std::cout << todo_char; // readd trailing percentage characters std::cout << closing_bracket_char << ' ' << perc << '%'; } } last_perc = perc; ++progress; std::cout << std::flush; return; } #endif
// The MIT License (MIT) // // Copyright (c) 2019 Luigi Pertoldi // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // // ============================================================================ // ___ ___ ___ __ ___ ____ __ __ ___ __ ___ // | |_) | |_) / / \ / /`_ | |_) | |_ ( (` ( (` | |_) / /\ | |_) // |_| |_| \ \_\_/ \_\_/ |_| \ |_|__ _)_) _)_) |_|_) /_/--\ |_| \ // // Very simple progress bar for c++ loops with internal running variable // // Author: Luigi Pertoldi // Created: 3 dic 2016 // // Notes: The bar must be used when there's no other possible source of output // inside the for loop // //# progressbar // //A very simple, header-only, fully customizable, progress bar (with percentage) //for c++ loops. // //Very simple to set up: //```cpp //#include "progressbar.hpp" // //int main() { // progressbar bar(100); // for (int i = 0; i < 100; ++i) { // bar.update(); // // ... the program // } // return 0; //} //``` //![animated gif](.github/example-simple.gif) // //Allows customization: //```cpp //#include "progressbar.hpp" // //int main() { // progressbar bar(100); // bar.set_todo_char(" "); // bar.set_done_char("█"); // bar.set_opening_bracket_char("{"); // bar.set_closing_bracket_char("}"); // for (int i = 0; i < 100; ++i) { // bar.update(); // // ... the program // } // return 0; //} //``` //![animated gif](.github/example-custom.gif) // //## Notes // // To use the bar in parallelized loops call `progressbar::update` in a critical // section. With [OpenMP](http://www.openmp.org) this can be achieved with the //following structure: //```cpp // //for ( ... ) { // //bar.update(); //} //``` #ifndef __PROGRESSBAR_HPP #define __PROGRESSBAR_HPP #include <iostream> #include <string> #include <stdexcept> class progressbar { public: // default destructor ~progressbar() = default; // delete everything else progressbar (progressbar const&) = delete; progressbar& operator=(progressbar const&) = delete; progressbar (progressbar&&) = delete; progressbar& operator=(progressbar&&) = delete; // default constructor, must call set_niter later progressbar(); progressbar(int n, bool showbar=true); // reset bar to use it again void reset(); // set number of loop iterations void set_niter(int iter); // chose your style void set_done_char(const std::string& sym) {done_char = sym;} void set_todo_char(const std::string& sym) {todo_char = sym;} void set_opening_bracket_char(const std::string& sym) {opening_bracket_char = sym;} void set_closing_bracket_char(const std::string& sym) {closing_bracket_char = sym;} // to show only the percentage void show_bar(bool flag = true) {do_show_bar = flag;} // main function void update(); private: int progress; int n_cycles; int last_perc; bool do_show_bar; bool update_is_called; std::string done_char; std::string todo_char; std::string opening_bracket_char; std::string closing_bracket_char; }; progressbar::progressbar() : progress(0), n_cycles(0), last_perc(0), do_show_bar(true), update_is_called(false), done_char("#"), todo_char(" "), opening_bracket_char("["), closing_bracket_char("]") {} progressbar::progressbar(int n, bool showbar) : progress(0), n_cycles(n), last_perc(0), do_show_bar(showbar), update_is_called(false), done_char("#"), todo_char(" "), opening_bracket_char("["), closing_bracket_char("]") {} void progressbar::reset() { progress = 0, update_is_called = false; last_perc = 0; return; } void progressbar::set_niter(int niter) { if (niter <= 0) throw std::invalid_argument( "progressbar::set_niter: number of iterations null or negative"); n_cycles = niter; return; } void progressbar::update() { if (n_cycles == 0) throw std::runtime_error( "progressbar::update: number of cycles not set"); if (!update_is_called) { if (do_show_bar == true) { std::cout << opening_bracket_char; for (int _ = 0; _ < 50; _++) std::cout << todo_char; std::cout << closing_bracket_char << " 0%"; } else std::cout << "0%"; } update_is_called = true; int perc = 0; // compute percentage, if did not change, do nothing and return perc = progress*100./(n_cycles-1); if (perc < last_perc) return; // update percentage each unit if (perc == last_perc + 1) { // erase the correct number of characters if (perc <= 10) std::cout << "\b\b" << perc << '%'; else if (perc > 10 and perc < 100) std::cout << "\b\b\b" << perc << '%'; else if (perc == 100) std::cout << "\b\b\b" << perc << '%'; } if (do_show_bar == true) { // update bar every ten units if (perc % 2 == 0) { // erase closing bracket std::cout << std::string(closing_bracket_char.size(), '\b'); // erase trailing percentage characters if (perc < 10) std::cout << "\b\b\b"; else if (perc >= 10 && perc < 100) std::cout << "\b\b\b\b"; else if (perc == 100) std::cout << "\b\b\b\b\b"; // erase 'todo_char' for (int j = 0; j < 50-(perc-1)/2; ++j) { std::cout << std::string(todo_char.size(), '\b'); } // add one additional 'done_char' if (perc == 0) std::cout << todo_char; else std::cout << done_char; // refill with 'todo_char' for (int j = 0; j < 50-(perc-1)/2-1; ++j) std::cout << todo_char; // readd trailing percentage characters std::cout << closing_bracket_char << ' ' << perc << '%'; } } last_perc = perc; ++progress; std::cout << std::flush; return; } #endif
// The MIT License (MIT) // // Copyright (c) 2019 Luigi Pertoldi // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // // ============================================================================ // ___ ___ ___ __ ___ ____ __ __ ___ __ ___ // | |_) | |_) / / \ / /`_ | |_) | |_ ( (` ( (` | |_) / /\ | |_) // |_| |_| \ \_\_/ \_\_/ |_| \ |_|__ _)_) _)_) |_|_) /_/--\ |_| \ // // Very simple progress bar for c++ loops with internal running variable // // Author: Luigi Pertoldi // Created: 3 dic 2016 // // Notes: The bar must be used when there's no other possible source of output // inside the for loop // //# progressbar // //A very simple, header-only, fully customizable, progress bar (with percentage) //for c++ loops. // //Very simple to set up: //```cpp //#include "progressbar.hpp" // //int main() { // progressbar bar(100); // for (int i = 0; i < 100; ++i) { // bar.update(); // // ... the program // } // return 0; //} //``` //![animated gif](.github/example-simple.gif) // //Allows customization: //```cpp //#include "progressbar.hpp" // //int main() { // progressbar bar(100); // bar.set_todo_char(" "); // bar.set_done_char("█"); // bar.set_opening_bracket_char("{"); // bar.set_closing_bracket_char("}"); // for (int i = 0; i < 100; ++i) { // bar.update(); // // ... the program // } // return 0; //} //``` //![animated gif](.github/example-custom.gif) // //## Notes // // To use the bar in parallelized loops call `progressbar::update` in a critical // section. With [OpenMP](http://www.openmp.org) this can be achieved with the //following structure: //```cpp //#pragma omp parallel for //for ( ... ) { //#pragma omp critical //bar.update(); //} //``` #ifndef __PROGRESSBAR_HPP #define __PROGRESSBAR_HPP #include <iostream> #include <string> #include <stdexcept> class progressbar { public: // default destructor ~progressbar() = default; // delete everything else progressbar (progressbar const&) = delete; progressbar& operator=(progressbar const&) = delete; progressbar (progressbar&&) = delete; progressbar& operator=(progressbar&&) = delete; // default constructor, must call set_niter later progressbar(); progressbar(int n, bool showbar=true); // reset bar to use it again void reset(); // set number of loop iterations void set_niter(int iter); // chose your style void set_done_char(const std::string& sym) {done_char = sym;} void set_todo_char(const std::string& sym) {todo_char = sym;} void set_opening_bracket_char(const std::string& sym) {opening_bracket_char = sym;} void set_closing_bracket_char(const std::string& sym) {closing_bracket_char = sym;} // to show only the percentage void show_bar(bool flag = true) {do_show_bar = flag;} // main function void update(); private: int progress; int n_cycles; int last_perc; bool do_show_bar; bool update_is_called; std::string done_char; std::string todo_char; std::string opening_bracket_char; std::string closing_bracket_char; }; progressbar::progressbar() : progress(0), n_cycles(0), last_perc(0), do_show_bar(true), update_is_called(false), done_char("#"), todo_char(" "), opening_bracket_char("["), closing_bracket_char("]") {} progressbar::progressbar(int n, bool showbar) : progress(0), n_cycles(n), last_perc(0), do_show_bar(showbar), update_is_called(false), done_char("#"), todo_char(" "), opening_bracket_char("["), closing_bracket_char("]") {} void progressbar::reset() { progress = 0, update_is_called = false; last_perc = 0; return; } void progressbar::set_niter(int niter) { if (niter <= 0) throw std::invalid_argument( "progressbar::set_niter: number of iterations null or negative"); n_cycles = niter; return; } void progressbar::update() { if (n_cycles == 0) throw std::runtime_error( "progressbar::update: number of cycles not set"); if (!update_is_called) { if (do_show_bar == true) { std::cout << opening_bracket_char; for (int _ = 0; _ < 50; _++) std::cout << todo_char; std::cout << closing_bracket_char << " 0%"; } else std::cout << "0%"; } update_is_called = true; int perc = 0; // compute percentage, if did not change, do nothing and return perc = progress*100./(n_cycles-1); if (perc < last_perc) return; // update percentage each unit if (perc == last_perc + 1) { // erase the correct number of characters if (perc <= 10) std::cout << "\b\b" << perc << '%'; else if (perc > 10 and perc < 100) std::cout << "\b\b\b" << perc << '%'; else if (perc == 100) std::cout << "\b\b\b" << perc << '%'; } if (do_show_bar == true) { // update bar every ten units if (perc % 2 == 0) { // erase closing bracket std::cout << std::string(closing_bracket_char.size(), '\b'); // erase trailing percentage characters if (perc < 10) std::cout << "\b\b\b"; else if (perc >= 10 && perc < 100) std::cout << "\b\b\b\b"; else if (perc == 100) std::cout << "\b\b\b\b\b"; // erase 'todo_char' for (int j = 0; j < 50-(perc-1)/2; ++j) { std::cout << std::string(todo_char.size(), '\b'); } // add one additional 'done_char' if (perc == 0) std::cout << todo_char; else std::cout << done_char; // refill with 'todo_char' for (int j = 0; j < 50-(perc-1)/2-1; ++j) std::cout << todo_char; // readd trailing percentage characters std::cout << closing_bracket_char << ' ' << perc << '%'; } } last_perc = perc; ++progress; std::cout << std::flush; return; } #endif
project2_Delaunoy_Crasset_EXPLICIT.c
#include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <mpi.h> #include <omp.h> #include "project2_Delaunoy_Crasset_EXPLICIT.h" #include "project2_Delaunoy_Crasset_IO.h" #define M_PI 3.14159265358979323846 /** * Compute the size of the arrays this process is responsible for * * Parameters: * rank: The rank of the calling process * nbproc: The number of processes * xSize: The discretization along the x axis * size_X: A pointer to an integer that will be set to the x size of eta and v * size_X_u: A pointer to an integer that will be set to the x size of u * size_X: A pointer to an integer that will be set to the x size of h * startval_X_h: A pointer to an integer that will be set to the starting value of h * endval_X_h: A pointer to an integer that will be set to the ending value of h */ void get_array_sizes(int rank, int nbproc, int xSize, int* size_X, int* size_X_u, int* size_X_h, int* startval_X_h, int* endval_X_h){ int mpi_xsize = xSize/nbproc; int startval_X, endval_X; int startval_X_u, endval_X_u; // When there is only one process if(nbproc == 1){ startval_X = 0; endval_X = xSize; *startval_X_h = 0; *endval_X_h = 2*xSize + 2; startval_X_u = 0; endval_X_u = xSize+1; } // When the process is the first else if(rank == 0){ startval_X = 0; endval_X = mpi_xsize; *startval_X_h = 0; *endval_X_h = 2*mpi_xsize + 2; startval_X_u = 0; endval_X_u = mpi_xsize; } // When the process lies in the middle of the matrix else if(rank == nbproc -1){ startval_X = rank * mpi_xsize + 1; endval_X = (rank+1) * mpi_xsize; *startval_X_h = 2 * rank * mpi_xsize + 2; *endval_X_h = 2 * (rank+1) * mpi_xsize + 2; startval_X_u = rank * mpi_xsize + 1; endval_X_u = (rank+1) * mpi_xsize + 1; } // When the process is the last else{ startval_X = rank * mpi_xsize + 1; endval_X = (rank+1) * mpi_xsize; *startval_X_h = 2 * rank * mpi_xsize + 2; *endval_X_h = 2 * (rank+1) * mpi_xsize + 2; startval_X_u = rank * mpi_xsize + 1; endval_X_u = (rank+1) * mpi_xsize; } // Add the remaining lines to first processes int remaining = xSize%nbproc; if(rank < remaining){ startval_X += rank; endval_X += rank + 1; startval_X_u += rank; endval_X_u += rank + 1; *startval_X_h += rank * 2; *endval_X_h += (rank + 1) * 2; } else{ *startval_X_h += remaining * 2; *endval_X_h += remaining * 2; } // Set variables *size_X = endval_X - startval_X + 1; *size_X_u = endval_X_u - startval_X_u + 1; *size_X_h = *endval_X_h - *startval_X_h + 1; } /** * Gather results from all process and save to disk * * Parameters: * eta: The eta array of the calling process * u: The u array of the calling process * v: The v array of the calling process * xSize: The discretization size along the x axis * ySize: The discretization size along the y axis * iteration: The iteration at which the save is performed * params: The structure holding the parameters of the run */ void gather_and_save(double** eta, double** u, double** v, int xSize, int ySize, unsigned int iteration, Parameters* params){ // Get process info int nbproc, myrank; MPI_Comm_size(MPI_COMM_WORLD, &nbproc); MPI_Comm_rank(MPI_COMM_WORLD, &myrank); // Get the array sizes int size_X, size_X_u, size_X_h, startval_X_h, endval_X_h; get_array_sizes(myrank, nbproc, xSize, &size_X, &size_X_u, &size_X_h, &startval_X_h, &endval_X_h); // Get number of threads int openMP_nbthreads = atoi(getenv("OMP_NUM_THREADS")); double* etaTotal; double* uTotal; double* vTotal; // Get process result double* etaPartial = transformMatrixToArray(eta, size_X, ySize +1); double* uPartial = transformMatrixToArray(u, size_X_u, ySize +1); double* vPartial = transformMatrixToArray(v, size_X, ySize +2); if(nbproc != 1){ // Compute the receive counts and displacements vectors int tmp_size_X; int tmp_size_X_u; int tmp_size_X_h; int tmp_startval_X_h; int tmp_endval_X_h; int* recvcounts_eta = malloc(nbproc * sizeof(int)); int* recvcounts_u = malloc(nbproc * sizeof(int)); int* recvcounts_v = malloc(nbproc * sizeof(int)); int* disp_eta = malloc(nbproc * sizeof(int)); int* disp_u = malloc(nbproc * sizeof(int)); int* disp_v = malloc(nbproc * sizeof(int)); if(!recvcounts_eta || !recvcounts_u || !recvcounts_v || !disp_eta || !disp_u || !disp_v){ fprintf(stderr, "error malloc recvcounts\n"); MPI_Finalize(); exit(-1); } for(int i = 0; i < nbproc; i++){ get_array_sizes(i, nbproc, xSize, &tmp_size_X, &tmp_size_X_u, &tmp_size_X_h, &tmp_startval_X_h, &tmp_endval_X_h); recvcounts_eta[i] = tmp_size_X * (ySize + 1); recvcounts_u[i] = tmp_size_X_u * (ySize + 1); recvcounts_v[i] = tmp_size_X * (ySize + 2); if(i == 0){ disp_eta[0] = 0; disp_u[0] = 0; disp_v[0] = 0; } if (i < nbproc - 1){ disp_eta[i + 1] = disp_eta[i] + tmp_size_X * (ySize + 1); disp_u[i + 1] = disp_u[i] + tmp_size_X_u * (ySize + 1); disp_v[i + 1] = disp_v[i] + tmp_size_X * (ySize + 2); } } // Gather the results of every process etaTotal = malloc((xSize + 1) * (ySize + 1)* sizeof(double)); uTotal = malloc((xSize + 2) * (ySize + 1)* sizeof(double)); vTotal = malloc((xSize + 1) * (ySize + 2)* sizeof(double)); MPI_Gatherv(etaPartial, (size_X) * (ySize + 1) , MPI_DOUBLE, etaTotal, recvcounts_eta, disp_eta, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Gatherv(uPartial, (size_X_u) * (ySize + 1) , MPI_DOUBLE, uTotal, recvcounts_u, disp_u, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Gatherv(vPartial, (size_X) * (ySize + 2) , MPI_DOUBLE, vTotal, recvcounts_v, disp_v, MPI_DOUBLE, 0, MPI_COMM_WORLD); // Free allocated memory free(etaPartial); free(uPartial); free(vPartial); free(recvcounts_eta); free(recvcounts_u); free(recvcounts_v); free(disp_eta); free(disp_u); free(disp_v); // Save results if(myrank == 0){ saveToDisk(etaTotal, uTotal, vTotal, xSize, ySize, iteration, params, nbproc, openMP_nbthreads); } } // In case there is only one process, save directly else{ etaTotal = transformMatrixToArray(eta, xSize + 1, ySize +1); uTotal = transformMatrixToArray(u, xSize + 2, ySize +1); vTotal= transformMatrixToArray(v, xSize + 1, ySize +2); saveToDisk(etaTotal, uTotal, vTotal, xSize, ySize, iteration, params, nbproc, openMP_nbthreads); } // Free allocated memory free(etaTotal); free(uTotal); free(vTotal); } /** * Solve the Navier-Stockes equations using explicit Euler method * * Parameters: * map: A structure containing the map infos * params: The parameters of the run * eta: A pointer to a matrix that will be set to the result of eta * u: A pointer to a matrix that will be set to the result of u * v: A pointer to a matrix that will be set to the result of v * * Returns: * An integer indicating whether the algorithm run with success or not */ int eulerExplicitMPI(Map* map, Parameters* params, double*** eta, double*** u, double*** v){ assert(map); assert(params); // Get process info int nbproc, myrank ; MPI_Comm_rank(MPI_COMM_WORLD, &myrank); MPI_Comm_size(MPI_COMM_WORLD, &nbproc); // Compute discretization size int xSize = (int)(map->a / params->deltaX); int ySize = (int)(map->b / params->deltaY); // Compute array sizes int size_X; int size_X_u; int size_X_h; int startval_X_h; int endval_X_h; get_array_sizes(myrank, nbproc, xSize, &size_X, &size_X_u, &size_X_h, &startval_X_h, &endval_X_h); // Allocate memory // eta in {0, 1, ..., a/dx}X{0, 1, ..., b/dy} double** etaCurr = allocateDoubleMatrix(size_X, ySize + 1); if(!etaCurr){ return -1; } double** etaNext = allocateDoubleMatrix(size_X, ySize + 1); if(!etaNext){ freeDoubleMatrix(etaCurr, size_X); return -1; } // u in {-1/2, 1/2, ..., a/dx + 1/2}X{0, 1, ..., b/dy} double** uCurr = allocateDoubleMatrix(size_X_u, ySize + 1); if(!uCurr){ freeDoubleMatrix(etaCurr,size_X); freeDoubleMatrix(etaNext,size_X); return -1; } double** uNext = allocateDoubleMatrix(size_X_u, ySize + 1); if(!uNext){ freeDoubleMatrix(etaCurr,size_X); freeDoubleMatrix(etaNext,size_X); freeDoubleMatrix(uCurr, size_X_u); return -1; } // v in {0, 1, .., a/dx}X{-1/2, 1/2, ..., b/dy + 1/2} double** vCurr = allocateDoubleMatrix(size_X, ySize + 2); if(!vCurr){ freeDoubleMatrix(etaCurr, size_X); freeDoubleMatrix(etaNext, size_X); freeDoubleMatrix(uCurr, size_X_u); freeDoubleMatrix(uNext, size_X_u); return -1; } double** vNext = allocateDoubleMatrix(size_X, ySize + 2); if(!vNext){ freeDoubleMatrix(etaCurr, size_X); freeDoubleMatrix(etaNext, size_X); freeDoubleMatrix(uCurr, size_X_u); freeDoubleMatrix(uNext, size_X_u); freeDoubleMatrix(vCurr, size_X); return -1; } // h in {-1/2, 0, 1/2, ..., a/dx, a/dx + 1/2}X{-1/2, 0, 1/2, ..., b/dy, b/dy + 1/2} double** h = allocateDoubleMatrix(size_X_h, 2 * ySize + 3); if(!h){ freeDoubleMatrix(etaCurr, size_X); freeDoubleMatrix(etaNext, size_X); freeDoubleMatrix(uCurr, size_X_u); freeDoubleMatrix(uNext, size_X_u); freeDoubleMatrix(vCurr, size_X); freeDoubleMatrix(vNext, size_X); return -1; } // Compute h from the provided map file for(int i = startval_X_h; i <= endval_X_h; i++){ for(int j = 0; j < 2 * ySize + 3; j++){ h[i-startval_X_h][j] = getGridValueAtDomainCoordinates(map, ((float)(i * xSize)/(xSize + 1)) * (params->deltaX / 2), ((float)(j * ySize)/(ySize + 1)) * (params->deltaY / 2)); } } // Initialize arrays #pragma omp parallel default(shared) { #pragma omp for schedule(static) for(int i = 0; i < size_X; i++){ for(int j = 0; j < ySize; j++){ etaCurr[i][j] = 0; } } #pragma omp for schedule(static) for(int i = 0; i < size_X_u; i++){ for(int j = 0; j < ySize; j++){ uCurr[i][j] = 0; } } #pragma omp for schedule(static) for(int i = 0; i < size_X; i++){ for(int j = 0; j < ySize; j++) vCurr[i][j] = 0; } } // Alocate arrays for receiving data from other process double* uReceived = malloc((ySize + 1) * sizeof(double)); double* etaReceived = malloc((ySize + 1) * sizeof(double)); // Starting time loop for(unsigned int t = 1; t <= params->TMax/params->deltaT; t++){ if(myrank == 0){ fprintf(stderr, "in loop t = %u\n", t); } // In a multiprocess environment, sending the leftmost column of u of the domain controlled // by the current process to the process with the previous rank if(nbproc != 1){ if(myrank == nbproc-1){ MPI_Send(uCurr[0], ySize + 1, MPI_DOUBLE, myrank - 1, 62, MPI_COMM_WORLD); //Tag 62 is for u }else if (myrank == 0){ MPI_Recv(uReceived, ySize + 1, MPI_DOUBLE, 1, 62, MPI_COMM_WORLD, MPI_STATUS_IGNORE); }else{ MPI_Sendrecv(uCurr[0], ySize + 1, MPI_DOUBLE, myrank - 1, 62, uReceived, ySize + 1, MPI_DOUBLE, myrank + 1, 62,MPI_COMM_WORLD, MPI_STATUS_IGNORE); } } // Compute the next value of eta #pragma omp parallel default(shared) { // Process etaNext in one block if(myrank == nbproc-1 || nbproc == 1){ #pragma omp for schedule(static) for(int i = 0; i < size_X; i++){ for(int j = 0; j < ySize + 1; j++){ etaNext[i][j] = (-(h[2*i+2][2*j+1] * uCurr[i+1][j] - h[2*i][2*j+1] * uCurr[i][j]) / params->deltaX -(h[2*i+1][2*j+2] * vCurr[i][j+1] - h[2*i+1][2*j] * vCurr[i][j]) / params->deltaY) * params->deltaT + etaCurr[i][j]; } } } else{ // Process the last column separately from the rest because we need to use uReceived from the // the process with higher rank #pragma omp for schedule(static) for(int i = 0; i < size_X - 1; i++){ for(int j = 0; j < ySize + 1; j++){ etaNext[i][j] = (-(h[2*i+2][2*j+1] * uCurr[i+1][j] - h[2*i][2*j+1] * uCurr[i][j]) / params->deltaX -(h[2*i+1][2*j+2] * vCurr[i][j+1] - h[2*i+1][2*j] * vCurr[i][j]) / params->deltaY) * params->deltaT + etaCurr[i][j]; } } #pragma omp for schedule(static) for(int j = 0; j < ySize + 1; j++){ etaNext[size_X-1][j] = (-(h[2*(size_X-1)+2][2*j+1] * uReceived[j] - h[2*(size_X-1)][2*j+1] * uCurr[size_X-1][j]) / params->deltaX -(h[2*(size_X-1)+1][2*j+2] * vCurr[size_X-1][j+1] - h[2*(size_X-1)+1][2*j] * vCurr[size_X-1][j]) / params->deltaY) * params->deltaT + etaCurr[size_X-1][j]; } } } // In a multiprocess environment, sending the rightmost column of eta of the domain controlled // by the current process to the process with the previous rank if(nbproc != 1){ if(myrank == 0){ MPI_Send(etaCurr[size_X-1], ySize + 1, MPI_DOUBLE, 1, 42, MPI_COMM_WORLD); //Tag 42 is for eta }else if (myrank == nbproc -1){ MPI_Recv(etaReceived, ySize + 1, MPI_DOUBLE, myrank - 1, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE); }else{ MPI_Sendrecv(etaCurr[size_X-1], ySize + 1, MPI_DOUBLE, myrank + 1, 42, etaReceived, ySize + 1, MPI_DOUBLE, myrank - 1, 42,MPI_COMM_WORLD, MPI_STATUS_IGNORE); } } // uNext Boundary conditions if(myrank == 0 || nbproc == 1){ for(int i = 0; i < ySize + 1; i++){ uNext[0][i] = 0; } } if(myrank == nbproc -1 || nbproc == 1){ for(int i = 0; i < ySize + 1; i++){ uNext[size_X_u - 1][i] = 0; } } // Compute the next value of u #pragma omp parallel default(shared) { // Process uNext in one block if(nbproc == 1){ #pragma omp for schedule(static) for(int i = 1; i < size_X_u-1; i++){ for(int j = 0; j < ySize + 1; j++){ uNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i-1][j]) / params->deltaX -params->gamma * uCurr[i][j]) * params->deltaT + uCurr[i][j]; } } } else if(myrank == 0){ #pragma omp for schedule(static) for(int i = 1; i < size_X_u; i++){ for(int j = 0; j < ySize + 1; j++){ uNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i-1][j]) / params->deltaX -params->gamma * uCurr[i][j]) * params->deltaT + uCurr[i][j]; } } } else if(myrank == nbproc-1){ // Process the first column separately from the rest because we need to use etaReceived from the // the process with lower rank // The last process has a smaller size along the x axis #pragma omp for schedule(static) for(int j = 0; j < ySize + 1; j++){ uNext[0][j] = (-params->g * (etaCurr[0][j] - etaReceived[j]) / params->deltaX -params->gamma * uCurr[0][j]) * params->deltaT + uCurr[0][j]; } #pragma omp for schedule(static) for(int i = 1; i < size_X_u-1; i++){ for(int j = 0; j < ySize + 1; j++){ uNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i-1][j]) / params->deltaX -params->gamma * uCurr[i][j]) * params->deltaT + uCurr[i][j]; } } } else{ // Process the first column separately from the rest because we need to use etaReceived from the // the process with lower rank #pragma omp for schedule(static) for(int j = 0; j < ySize + 1; j++){ uNext[0][j] = (-params->g * (etaCurr[0][j] - etaReceived[j]) / params->deltaX -params->gamma * uCurr[0][j]) * params->deltaT + uCurr[0][j]; } #pragma omp for schedule(static) for(int i = 1; i < size_X_u; i++){ for(int j = 0; j < ySize + 1; j++){ uNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i-1][j]) / params->deltaX -params->gamma * uCurr[i][j]) * params->deltaT + uCurr[i][j]; } } } } // Boundary conditions for v for(int i = 0; i < size_X; i++) vNext[i][0] = 0; // Setting the excitation on the rightmost column of the whole domain space for(int i = 0; i < size_X; i++){ if(params->s == 0) //Sinusoidal excitation vNext[i][ySize+1] = params->A * sin(2 * M_PI * params->f * t * params->deltaT); else // Exponentially decaying excitation vNext[i][ySize+1] = params->A * sin(2 * M_PI * params->f * t * params->deltaT) * exp(- t * params->deltaT / 500); } // Compute the next value of v #pragma omp parallel default(shared) { #pragma omp for schedule(static) for(int i = 0; i < size_X; i++){ for(int j = 1; j < ySize + 1; j++){ vNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i][j-1]) / params->deltaY -params->gamma * vCurr[i][j]) * params->deltaT + vCurr[i][j]; } } } // Process 0 gathers the sub-matrices of the processes and saves them to disk if(params->S != 0 && t % params->S == 0){ gather_and_save(etaNext,uNext,vNext, xSize,ySize, t, params); } // Go to next step double** tmp; tmp = etaCurr; etaCurr = etaNext; etaNext = tmp; tmp = uCurr; uCurr = uNext; uNext = tmp; tmp = vCurr; vCurr = vNext; vNext = tmp; } // Return values *eta = etaCurr; *u = uCurr; *v = vCurr; freeDoubleMatrix(etaNext, size_X); freeDoubleMatrix(uNext, size_X_u); freeDoubleMatrix(vNext, size_X); freeDoubleMatrix((double**) h, size_X_h); free(uReceived); free(etaReceived); return 0; }
#include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <mpi.h> #include <omp.h> #include "project2_Delaunoy_Crasset_EXPLICIT.h" #include "project2_Delaunoy_Crasset_IO.h" #define M_PI 3.14159265358979323846 /** * Compute the size of the arrays this process is responsible for * * Parameters: * rank: The rank of the calling process * nbproc: The number of processes * xSize: The discretization along the x axis * size_X: A pointer to an integer that will be set to the x size of eta and v * size_X_u: A pointer to an integer that will be set to the x size of u * size_X: A pointer to an integer that will be set to the x size of h * startval_X_h: A pointer to an integer that will be set to the starting value of h * endval_X_h: A pointer to an integer that will be set to the ending value of h */ void get_array_sizes(int rank, int nbproc, int xSize, int* size_X, int* size_X_u, int* size_X_h, int* startval_X_h, int* endval_X_h){ int mpi_xsize = xSize/nbproc; int startval_X, endval_X; int startval_X_u, endval_X_u; // When there is only one process if(nbproc == 1){ startval_X = 0; endval_X = xSize; *startval_X_h = 0; *endval_X_h = 2*xSize + 2; startval_X_u = 0; endval_X_u = xSize+1; } // When the process is the first else if(rank == 0){ startval_X = 0; endval_X = mpi_xsize; *startval_X_h = 0; *endval_X_h = 2*mpi_xsize + 2; startval_X_u = 0; endval_X_u = mpi_xsize; } // When the process lies in the middle of the matrix else if(rank == nbproc -1){ startval_X = rank * mpi_xsize + 1; endval_X = (rank+1) * mpi_xsize; *startval_X_h = 2 * rank * mpi_xsize + 2; *endval_X_h = 2 * (rank+1) * mpi_xsize + 2; startval_X_u = rank * mpi_xsize + 1; endval_X_u = (rank+1) * mpi_xsize + 1; } // When the process is the last else{ startval_X = rank * mpi_xsize + 1; endval_X = (rank+1) * mpi_xsize; *startval_X_h = 2 * rank * mpi_xsize + 2; *endval_X_h = 2 * (rank+1) * mpi_xsize + 2; startval_X_u = rank * mpi_xsize + 1; endval_X_u = (rank+1) * mpi_xsize; } // Add the remaining lines to first processes int remaining = xSize%nbproc; if(rank < remaining){ startval_X += rank; endval_X += rank + 1; startval_X_u += rank; endval_X_u += rank + 1; *startval_X_h += rank * 2; *endval_X_h += (rank + 1) * 2; } else{ *startval_X_h += remaining * 2; *endval_X_h += remaining * 2; } // Set variables *size_X = endval_X - startval_X + 1; *size_X_u = endval_X_u - startval_X_u + 1; *size_X_h = *endval_X_h - *startval_X_h + 1; } /** * Gather results from all process and save to disk * * Parameters: * eta: The eta array of the calling process * u: The u array of the calling process * v: The v array of the calling process * xSize: The discretization size along the x axis * ySize: The discretization size along the y axis * iteration: The iteration at which the save is performed * params: The structure holding the parameters of the run */ void gather_and_save(double** eta, double** u, double** v, int xSize, int ySize, unsigned int iteration, Parameters* params){ // Get process info int nbproc, myrank; MPI_Comm_size(MPI_COMM_WORLD, &nbproc); MPI_Comm_rank(MPI_COMM_WORLD, &myrank); // Get the array sizes int size_X, size_X_u, size_X_h, startval_X_h, endval_X_h; get_array_sizes(myrank, nbproc, xSize, &size_X, &size_X_u, &size_X_h, &startval_X_h, &endval_X_h); // Get number of threads int openMP_nbthreads = atoi(getenv("OMP_NUM_THREADS")); double* etaTotal; double* uTotal; double* vTotal; // Get process result double* etaPartial = transformMatrixToArray(eta, size_X, ySize +1); double* uPartial = transformMatrixToArray(u, size_X_u, ySize +1); double* vPartial = transformMatrixToArray(v, size_X, ySize +2); if(nbproc != 1){ // Compute the receive counts and displacements vectors int tmp_size_X; int tmp_size_X_u; int tmp_size_X_h; int tmp_startval_X_h; int tmp_endval_X_h; int* recvcounts_eta = malloc(nbproc * sizeof(int)); int* recvcounts_u = malloc(nbproc * sizeof(int)); int* recvcounts_v = malloc(nbproc * sizeof(int)); int* disp_eta = malloc(nbproc * sizeof(int)); int* disp_u = malloc(nbproc * sizeof(int)); int* disp_v = malloc(nbproc * sizeof(int)); if(!recvcounts_eta || !recvcounts_u || !recvcounts_v || !disp_eta || !disp_u || !disp_v){ fprintf(stderr, "error malloc recvcounts\n"); MPI_Finalize(); exit(-1); } for(int i = 0; i < nbproc; i++){ get_array_sizes(i, nbproc, xSize, &tmp_size_X, &tmp_size_X_u, &tmp_size_X_h, &tmp_startval_X_h, &tmp_endval_X_h); recvcounts_eta[i] = tmp_size_X * (ySize + 1); recvcounts_u[i] = tmp_size_X_u * (ySize + 1); recvcounts_v[i] = tmp_size_X * (ySize + 2); if(i == 0){ disp_eta[0] = 0; disp_u[0] = 0; disp_v[0] = 0; } if (i < nbproc - 1){ disp_eta[i + 1] = disp_eta[i] + tmp_size_X * (ySize + 1); disp_u[i + 1] = disp_u[i] + tmp_size_X_u * (ySize + 1); disp_v[i + 1] = disp_v[i] + tmp_size_X * (ySize + 2); } } // Gather the results of every process etaTotal = malloc((xSize + 1) * (ySize + 1)* sizeof(double)); uTotal = malloc((xSize + 2) * (ySize + 1)* sizeof(double)); vTotal = malloc((xSize + 1) * (ySize + 2)* sizeof(double)); MPI_Gatherv(etaPartial, (size_X) * (ySize + 1) , MPI_DOUBLE, etaTotal, recvcounts_eta, disp_eta, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Gatherv(uPartial, (size_X_u) * (ySize + 1) , MPI_DOUBLE, uTotal, recvcounts_u, disp_u, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Gatherv(vPartial, (size_X) * (ySize + 2) , MPI_DOUBLE, vTotal, recvcounts_v, disp_v, MPI_DOUBLE, 0, MPI_COMM_WORLD); // Free allocated memory free(etaPartial); free(uPartial); free(vPartial); free(recvcounts_eta); free(recvcounts_u); free(recvcounts_v); free(disp_eta); free(disp_u); free(disp_v); // Save results if(myrank == 0){ saveToDisk(etaTotal, uTotal, vTotal, xSize, ySize, iteration, params, nbproc, openMP_nbthreads); } } // In case there is only one process, save directly else{ etaTotal = transformMatrixToArray(eta, xSize + 1, ySize +1); uTotal = transformMatrixToArray(u, xSize + 2, ySize +1); vTotal= transformMatrixToArray(v, xSize + 1, ySize +2); saveToDisk(etaTotal, uTotal, vTotal, xSize, ySize, iteration, params, nbproc, openMP_nbthreads); } // Free allocated memory free(etaTotal); free(uTotal); free(vTotal); } /** * Solve the Navier-Stockes equations using explicit Euler method * * Parameters: * map: A structure containing the map infos * params: The parameters of the run * eta: A pointer to a matrix that will be set to the result of eta * u: A pointer to a matrix that will be set to the result of u * v: A pointer to a matrix that will be set to the result of v * * Returns: * An integer indicating whether the algorithm run with success or not */ int eulerExplicitMPI(Map* map, Parameters* params, double*** eta, double*** u, double*** v){ assert(map); assert(params); // Get process info int nbproc, myrank ; MPI_Comm_rank(MPI_COMM_WORLD, &myrank); MPI_Comm_size(MPI_COMM_WORLD, &nbproc); // Compute discretization size int xSize = (int)(map->a / params->deltaX); int ySize = (int)(map->b / params->deltaY); // Compute array sizes int size_X; int size_X_u; int size_X_h; int startval_X_h; int endval_X_h; get_array_sizes(myrank, nbproc, xSize, &size_X, &size_X_u, &size_X_h, &startval_X_h, &endval_X_h); // Allocate memory // eta in {0, 1, ..., a/dx}X{0, 1, ..., b/dy} double** etaCurr = allocateDoubleMatrix(size_X, ySize + 1); if(!etaCurr){ return -1; } double** etaNext = allocateDoubleMatrix(size_X, ySize + 1); if(!etaNext){ freeDoubleMatrix(etaCurr, size_X); return -1; } // u in {-1/2, 1/2, ..., a/dx + 1/2}X{0, 1, ..., b/dy} double** uCurr = allocateDoubleMatrix(size_X_u, ySize + 1); if(!uCurr){ freeDoubleMatrix(etaCurr,size_X); freeDoubleMatrix(etaNext,size_X); return -1; } double** uNext = allocateDoubleMatrix(size_X_u, ySize + 1); if(!uNext){ freeDoubleMatrix(etaCurr,size_X); freeDoubleMatrix(etaNext,size_X); freeDoubleMatrix(uCurr, size_X_u); return -1; } // v in {0, 1, .., a/dx}X{-1/2, 1/2, ..., b/dy + 1/2} double** vCurr = allocateDoubleMatrix(size_X, ySize + 2); if(!vCurr){ freeDoubleMatrix(etaCurr, size_X); freeDoubleMatrix(etaNext, size_X); freeDoubleMatrix(uCurr, size_X_u); freeDoubleMatrix(uNext, size_X_u); return -1; } double** vNext = allocateDoubleMatrix(size_X, ySize + 2); if(!vNext){ freeDoubleMatrix(etaCurr, size_X); freeDoubleMatrix(etaNext, size_X); freeDoubleMatrix(uCurr, size_X_u); freeDoubleMatrix(uNext, size_X_u); freeDoubleMatrix(vCurr, size_X); return -1; } // h in {-1/2, 0, 1/2, ..., a/dx, a/dx + 1/2}X{-1/2, 0, 1/2, ..., b/dy, b/dy + 1/2} double** h = allocateDoubleMatrix(size_X_h, 2 * ySize + 3); if(!h){ freeDoubleMatrix(etaCurr, size_X); freeDoubleMatrix(etaNext, size_X); freeDoubleMatrix(uCurr, size_X_u); freeDoubleMatrix(uNext, size_X_u); freeDoubleMatrix(vCurr, size_X); freeDoubleMatrix(vNext, size_X); return -1; } // Compute h from the provided map file for(int i = startval_X_h; i <= endval_X_h; i++){ for(int j = 0; j < 2 * ySize + 3; j++){ h[i-startval_X_h][j] = getGridValueAtDomainCoordinates(map, ((float)(i * xSize)/(xSize + 1)) * (params->deltaX / 2), ((float)(j * ySize)/(ySize + 1)) * (params->deltaY / 2)); } } // Initialize arrays for(int i = 0; i < size_X; i++){ for(int j = 0; j < ySize; j++){ etaCurr[i][j] = 0; } } for(int i = 0; i < size_X_u; i++){ for(int j = 0; j < ySize; j++){ uCurr[i][j] = 0; } } for(int i = 0; i < size_X; i++){ for(int j = 0; j < ySize; j++) vCurr[i][j] = 0; } // Alocate arrays for receiving data from other process double* uReceived = malloc((ySize + 1) * sizeof(double)); double* etaReceived = malloc((ySize + 1) * sizeof(double)); // Starting time loop for(unsigned int t = 1; t <= params->TMax/params->deltaT; t++){ if(myrank == 0){ fprintf(stderr, "in loop t = %u\n", t); } // In a multiprocess environment, sending the leftmost column of u of the domain controlled // by the current process to the process with the previous rank if(nbproc != 1){ if(myrank == nbproc-1){ MPI_Send(uCurr[0], ySize + 1, MPI_DOUBLE, myrank - 1, 62, MPI_COMM_WORLD); //Tag 62 is for u }else if (myrank == 0){ MPI_Recv(uReceived, ySize + 1, MPI_DOUBLE, 1, 62, MPI_COMM_WORLD, MPI_STATUS_IGNORE); }else{ MPI_Sendrecv(uCurr[0], ySize + 1, MPI_DOUBLE, myrank - 1, 62, uReceived, ySize + 1, MPI_DOUBLE, myrank + 1, 62,MPI_COMM_WORLD, MPI_STATUS_IGNORE); } } // Compute the next value of eta // Process etaNext in one block if(myrank == nbproc-1 || nbproc == 1){ for(int i = 0; i < size_X; i++){ for(int j = 0; j < ySize + 1; j++){ etaNext[i][j] = (-(h[2*i+2][2*j+1] * uCurr[i+1][j] - h[2*i][2*j+1] * uCurr[i][j]) / params->deltaX -(h[2*i+1][2*j+2] * vCurr[i][j+1] - h[2*i+1][2*j] * vCurr[i][j]) / params->deltaY) * params->deltaT + etaCurr[i][j]; } } } else{ // Process the last column separately from the rest because we need to use uReceived from the // the process with higher rank for(int i = 0; i < size_X - 1; i++){ for(int j = 0; j < ySize + 1; j++){ etaNext[i][j] = (-(h[2*i+2][2*j+1] * uCurr[i+1][j] - h[2*i][2*j+1] * uCurr[i][j]) / params->deltaX -(h[2*i+1][2*j+2] * vCurr[i][j+1] - h[2*i+1][2*j] * vCurr[i][j]) / params->deltaY) * params->deltaT + etaCurr[i][j]; } } for(int j = 0; j < ySize + 1; j++){ etaNext[size_X-1][j] = (-(h[2*(size_X-1)+2][2*j+1] * uReceived[j] - h[2*(size_X-1)][2*j+1] * uCurr[size_X-1][j]) / params->deltaX -(h[2*(size_X-1)+1][2*j+2] * vCurr[size_X-1][j+1] - h[2*(size_X-1)+1][2*j] * vCurr[size_X-1][j]) / params->deltaY) * params->deltaT + etaCurr[size_X-1][j]; } } // In a multiprocess environment, sending the rightmost column of eta of the domain controlled // by the current process to the process with the previous rank if(nbproc != 1){ if(myrank == 0){ MPI_Send(etaCurr[size_X-1], ySize + 1, MPI_DOUBLE, 1, 42, MPI_COMM_WORLD); //Tag 42 is for eta }else if (myrank == nbproc -1){ MPI_Recv(etaReceived, ySize + 1, MPI_DOUBLE, myrank - 1, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE); }else{ MPI_Sendrecv(etaCurr[size_X-1], ySize + 1, MPI_DOUBLE, myrank + 1, 42, etaReceived, ySize + 1, MPI_DOUBLE, myrank - 1, 42,MPI_COMM_WORLD, MPI_STATUS_IGNORE); } } // uNext Boundary conditions if(myrank == 0 || nbproc == 1){ for(int i = 0; i < ySize + 1; i++){ uNext[0][i] = 0; } } if(myrank == nbproc -1 || nbproc == 1){ for(int i = 0; i < ySize + 1; i++){ uNext[size_X_u - 1][i] = 0; } } // Compute the next value of u // Process uNext in one block if(nbproc == 1){ for(int i = 1; i < size_X_u-1; i++){ for(int j = 0; j < ySize + 1; j++){ uNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i-1][j]) / params->deltaX -params->gamma * uCurr[i][j]) * params->deltaT + uCurr[i][j]; } } } else if(myrank == 0){ for(int i = 1; i < size_X_u; i++){ for(int j = 0; j < ySize + 1; j++){ uNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i-1][j]) / params->deltaX -params->gamma * uCurr[i][j]) * params->deltaT + uCurr[i][j]; } } } else if(myrank == nbproc-1){ // Process the first column separately from the rest because we need to use etaReceived from the // the process with lower rank // The last process has a smaller size along the x axis for(int j = 0; j < ySize + 1; j++){ uNext[0][j] = (-params->g * (etaCurr[0][j] - etaReceived[j]) / params->deltaX -params->gamma * uCurr[0][j]) * params->deltaT + uCurr[0][j]; } for(int i = 1; i < size_X_u-1; i++){ for(int j = 0; j < ySize + 1; j++){ uNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i-1][j]) / params->deltaX -params->gamma * uCurr[i][j]) * params->deltaT + uCurr[i][j]; } } } else{ // Process the first column separately from the rest because we need to use etaReceived from the // the process with lower rank for(int j = 0; j < ySize + 1; j++){ uNext[0][j] = (-params->g * (etaCurr[0][j] - etaReceived[j]) / params->deltaX -params->gamma * uCurr[0][j]) * params->deltaT + uCurr[0][j]; } for(int i = 1; i < size_X_u; i++){ for(int j = 0; j < ySize + 1; j++){ uNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i-1][j]) / params->deltaX -params->gamma * uCurr[i][j]) * params->deltaT + uCurr[i][j]; } } } // Boundary conditions for v for(int i = 0; i < size_X; i++) vNext[i][0] = 0; // Setting the excitation on the rightmost column of the whole domain space for(int i = 0; i < size_X; i++){ if(params->s == 0) //Sinusoidal excitation vNext[i][ySize+1] = params->A * sin(2 * M_PI * params->f * t * params->deltaT); else // Exponentially decaying excitation vNext[i][ySize+1] = params->A * sin(2 * M_PI * params->f * t * params->deltaT) * exp(- t * params->deltaT / 500); } // Compute the next value of v for(int i = 0; i < size_X; i++){ for(int j = 1; j < ySize + 1; j++){ vNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i][j-1]) / params->deltaY -params->gamma * vCurr[i][j]) * params->deltaT + vCurr[i][j]; } } // Process 0 gathers the sub-matrices of the processes and saves them to disk if(params->S != 0 && t % params->S == 0){ gather_and_save(etaNext,uNext,vNext, xSize,ySize, t, params); } // Go to next step double** tmp; tmp = etaCurr; etaCurr = etaNext; etaNext = tmp; tmp = uCurr; uCurr = uNext; uNext = tmp; tmp = vCurr; vCurr = vNext; vNext = tmp; } // Return values *eta = etaCurr; *u = uCurr; *v = vCurr; freeDoubleMatrix(etaNext, size_X); freeDoubleMatrix(uNext, size_X_u); freeDoubleMatrix(vNext, size_X); freeDoubleMatrix((double**) h, size_X_h); free(uReceived); free(etaReceived); return 0; }
#include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <mpi.h> #include <omp.h> #include "project2_Delaunoy_Crasset_EXPLICIT.h" #include "project2_Delaunoy_Crasset_IO.h" #define M_PI 3.14159265358979323846 /** * Compute the size of the arrays this process is responsible for * * Parameters: * rank: The rank of the calling process * nbproc: The number of processes * xSize: The discretization along the x axis * size_X: A pointer to an integer that will be set to the x size of eta and v * size_X_u: A pointer to an integer that will be set to the x size of u * size_X: A pointer to an integer that will be set to the x size of h * startval_X_h: A pointer to an integer that will be set to the starting value of h * endval_X_h: A pointer to an integer that will be set to the ending value of h */ void get_array_sizes(int rank, int nbproc, int xSize, int* size_X, int* size_X_u, int* size_X_h, int* startval_X_h, int* endval_X_h){ int mpi_xsize = xSize/nbproc; int startval_X, endval_X; int startval_X_u, endval_X_u; // When there is only one process if(nbproc == 1){ startval_X = 0; endval_X = xSize; *startval_X_h = 0; *endval_X_h = 2*xSize + 2; startval_X_u = 0; endval_X_u = xSize+1; } // When the process is the first else if(rank == 0){ startval_X = 0; endval_X = mpi_xsize; *startval_X_h = 0; *endval_X_h = 2*mpi_xsize + 2; startval_X_u = 0; endval_X_u = mpi_xsize; } // When the process lies in the middle of the matrix else if(rank == nbproc -1){ startval_X = rank * mpi_xsize + 1; endval_X = (rank+1) * mpi_xsize; *startval_X_h = 2 * rank * mpi_xsize + 2; *endval_X_h = 2 * (rank+1) * mpi_xsize + 2; startval_X_u = rank * mpi_xsize + 1; endval_X_u = (rank+1) * mpi_xsize + 1; } // When the process is the last else{ startval_X = rank * mpi_xsize + 1; endval_X = (rank+1) * mpi_xsize; *startval_X_h = 2 * rank * mpi_xsize + 2; *endval_X_h = 2 * (rank+1) * mpi_xsize + 2; startval_X_u = rank * mpi_xsize + 1; endval_X_u = (rank+1) * mpi_xsize; } // Add the remaining lines to first processes int remaining = xSize%nbproc; if(rank < remaining){ startval_X += rank; endval_X += rank + 1; startval_X_u += rank; endval_X_u += rank + 1; *startval_X_h += rank * 2; *endval_X_h += (rank + 1) * 2; } else{ *startval_X_h += remaining * 2; *endval_X_h += remaining * 2; } // Set variables *size_X = endval_X - startval_X + 1; *size_X_u = endval_X_u - startval_X_u + 1; *size_X_h = *endval_X_h - *startval_X_h + 1; } /** * Gather results from all process and save to disk * * Parameters: * eta: The eta array of the calling process * u: The u array of the calling process * v: The v array of the calling process * xSize: The discretization size along the x axis * ySize: The discretization size along the y axis * iteration: The iteration at which the save is performed * params: The structure holding the parameters of the run */ void gather_and_save(double** eta, double** u, double** v, int xSize, int ySize, unsigned int iteration, Parameters* params){ // Get process info int nbproc, myrank; MPI_Comm_size(MPI_COMM_WORLD, &nbproc); MPI_Comm_rank(MPI_COMM_WORLD, &myrank); // Get the array sizes int size_X, size_X_u, size_X_h, startval_X_h, endval_X_h; get_array_sizes(myrank, nbproc, xSize, &size_X, &size_X_u, &size_X_h, &startval_X_h, &endval_X_h); // Get number of threads int openMP_nbthreads = atoi(getenv("OMP_NUM_THREADS")); double* etaTotal; double* uTotal; double* vTotal; // Get process result double* etaPartial = transformMatrixToArray(eta, size_X, ySize +1); double* uPartial = transformMatrixToArray(u, size_X_u, ySize +1); double* vPartial = transformMatrixToArray(v, size_X, ySize +2); if(nbproc != 1){ // Compute the receive counts and displacements vectors int tmp_size_X; int tmp_size_X_u; int tmp_size_X_h; int tmp_startval_X_h; int tmp_endval_X_h; int* recvcounts_eta = malloc(nbproc * sizeof(int)); int* recvcounts_u = malloc(nbproc * sizeof(int)); int* recvcounts_v = malloc(nbproc * sizeof(int)); int* disp_eta = malloc(nbproc * sizeof(int)); int* disp_u = malloc(nbproc * sizeof(int)); int* disp_v = malloc(nbproc * sizeof(int)); if(!recvcounts_eta || !recvcounts_u || !recvcounts_v || !disp_eta || !disp_u || !disp_v){ fprintf(stderr, "error malloc recvcounts\n"); MPI_Finalize(); exit(-1); } for(int i = 0; i < nbproc; i++){ get_array_sizes(i, nbproc, xSize, &tmp_size_X, &tmp_size_X_u, &tmp_size_X_h, &tmp_startval_X_h, &tmp_endval_X_h); recvcounts_eta[i] = tmp_size_X * (ySize + 1); recvcounts_u[i] = tmp_size_X_u * (ySize + 1); recvcounts_v[i] = tmp_size_X * (ySize + 2); if(i == 0){ disp_eta[0] = 0; disp_u[0] = 0; disp_v[0] = 0; } if (i < nbproc - 1){ disp_eta[i + 1] = disp_eta[i] + tmp_size_X * (ySize + 1); disp_u[i + 1] = disp_u[i] + tmp_size_X_u * (ySize + 1); disp_v[i + 1] = disp_v[i] + tmp_size_X * (ySize + 2); } } // Gather the results of every process etaTotal = malloc((xSize + 1) * (ySize + 1)* sizeof(double)); uTotal = malloc((xSize + 2) * (ySize + 1)* sizeof(double)); vTotal = malloc((xSize + 1) * (ySize + 2)* sizeof(double)); MPI_Gatherv(etaPartial, (size_X) * (ySize + 1) , MPI_DOUBLE, etaTotal, recvcounts_eta, disp_eta, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Gatherv(uPartial, (size_X_u) * (ySize + 1) , MPI_DOUBLE, uTotal, recvcounts_u, disp_u, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Gatherv(vPartial, (size_X) * (ySize + 2) , MPI_DOUBLE, vTotal, recvcounts_v, disp_v, MPI_DOUBLE, 0, MPI_COMM_WORLD); // Free allocated memory free(etaPartial); free(uPartial); free(vPartial); free(recvcounts_eta); free(recvcounts_u); free(recvcounts_v); free(disp_eta); free(disp_u); free(disp_v); // Save results if(myrank == 0){ saveToDisk(etaTotal, uTotal, vTotal, xSize, ySize, iteration, params, nbproc, openMP_nbthreads); } } // In case there is only one process, save directly else{ etaTotal = transformMatrixToArray(eta, xSize + 1, ySize +1); uTotal = transformMatrixToArray(u, xSize + 2, ySize +1); vTotal= transformMatrixToArray(v, xSize + 1, ySize +2); saveToDisk(etaTotal, uTotal, vTotal, xSize, ySize, iteration, params, nbproc, openMP_nbthreads); } // Free allocated memory free(etaTotal); free(uTotal); free(vTotal); } /** * Solve the Navier-Stockes equations using explicit Euler method * * Parameters: * map: A structure containing the map infos * params: The parameters of the run * eta: A pointer to a matrix that will be set to the result of eta * u: A pointer to a matrix that will be set to the result of u * v: A pointer to a matrix that will be set to the result of v * * Returns: * An integer indicating whether the algorithm run with success or not */ int eulerExplicitMPI(Map* map, Parameters* params, double*** eta, double*** u, double*** v){ assert(map); assert(params); // Get process info int nbproc, myrank ; MPI_Comm_rank(MPI_COMM_WORLD, &myrank); MPI_Comm_size(MPI_COMM_WORLD, &nbproc); // Compute discretization size int xSize = (int)(map->a / params->deltaX); int ySize = (int)(map->b / params->deltaY); // Compute array sizes int size_X; int size_X_u; int size_X_h; int startval_X_h; int endval_X_h; get_array_sizes(myrank, nbproc, xSize, &size_X, &size_X_u, &size_X_h, &startval_X_h, &endval_X_h); // Allocate memory // eta in {0, 1, ..., a/dx}X{0, 1, ..., b/dy} double** etaCurr = allocateDoubleMatrix(size_X, ySize + 1); if(!etaCurr){ return -1; } double** etaNext = allocateDoubleMatrix(size_X, ySize + 1); if(!etaNext){ freeDoubleMatrix(etaCurr, size_X); return -1; } // u in {-1/2, 1/2, ..., a/dx + 1/2}X{0, 1, ..., b/dy} double** uCurr = allocateDoubleMatrix(size_X_u, ySize + 1); if(!uCurr){ freeDoubleMatrix(etaCurr,size_X); freeDoubleMatrix(etaNext,size_X); return -1; } double** uNext = allocateDoubleMatrix(size_X_u, ySize + 1); if(!uNext){ freeDoubleMatrix(etaCurr,size_X); freeDoubleMatrix(etaNext,size_X); freeDoubleMatrix(uCurr, size_X_u); return -1; } // v in {0, 1, .., a/dx}X{-1/2, 1/2, ..., b/dy + 1/2} double** vCurr = allocateDoubleMatrix(size_X, ySize + 2); if(!vCurr){ freeDoubleMatrix(etaCurr, size_X); freeDoubleMatrix(etaNext, size_X); freeDoubleMatrix(uCurr, size_X_u); freeDoubleMatrix(uNext, size_X_u); return -1; } double** vNext = allocateDoubleMatrix(size_X, ySize + 2); if(!vNext){ freeDoubleMatrix(etaCurr, size_X); freeDoubleMatrix(etaNext, size_X); freeDoubleMatrix(uCurr, size_X_u); freeDoubleMatrix(uNext, size_X_u); freeDoubleMatrix(vCurr, size_X); return -1; } // h in {-1/2, 0, 1/2, ..., a/dx, a/dx + 1/2}X{-1/2, 0, 1/2, ..., b/dy, b/dy + 1/2} double** h = allocateDoubleMatrix(size_X_h, 2 * ySize + 3); if(!h){ freeDoubleMatrix(etaCurr, size_X); freeDoubleMatrix(etaNext, size_X); freeDoubleMatrix(uCurr, size_X_u); freeDoubleMatrix(uNext, size_X_u); freeDoubleMatrix(vCurr, size_X); freeDoubleMatrix(vNext, size_X); return -1; } // Compute h from the provided map file for(int i = startval_X_h; i <= endval_X_h; i++){ for(int j = 0; j < 2 * ySize + 3; j++){ h[i-startval_X_h][j] = getGridValueAtDomainCoordinates(map, ((float)(i * xSize)/(xSize + 1)) * (params->deltaX / 2), ((float)(j * ySize)/(ySize + 1)) * (params->deltaY / 2)); } } // Initialize arrays #pragma omp parallel default(shared) { #pragma omp for schedule(static) for(int i = 0; i < size_X; i++){ for(int j = 0; j < ySize; j++){ etaCurr[i][j] = 0; } } #pragma omp for schedule(static) for(int i = 0; i < size_X_u; i++){ for(int j = 0; j < ySize; j++){ uCurr[i][j] = 0; } } #pragma omp for schedule(static) for(int i = 0; i < size_X; i++){ for(int j = 0; j < ySize; j++) vCurr[i][j] = 0; } } // Alocate arrays for receiving data from other process double* uReceived = malloc((ySize + 1) * sizeof(double)); double* etaReceived = malloc((ySize + 1) * sizeof(double)); // Starting time loop for(unsigned int t = 1; t <= params->TMax/params->deltaT; t++){ if(myrank == 0){ fprintf(stderr, "in loop t = %u\n", t); } // In a multiprocess environment, sending the leftmost column of u of the domain controlled // by the current process to the process with the previous rank if(nbproc != 1){ if(myrank == nbproc-1){ MPI_Send(uCurr[0], ySize + 1, MPI_DOUBLE, myrank - 1, 62, MPI_COMM_WORLD); //Tag 62 is for u }else if (myrank == 0){ MPI_Recv(uReceived, ySize + 1, MPI_DOUBLE, 1, 62, MPI_COMM_WORLD, MPI_STATUS_IGNORE); }else{ MPI_Sendrecv(uCurr[0], ySize + 1, MPI_DOUBLE, myrank - 1, 62, uReceived, ySize + 1, MPI_DOUBLE, myrank + 1, 62,MPI_COMM_WORLD, MPI_STATUS_IGNORE); } } // Compute the next value of eta #pragma omp parallel default(shared) { // Process etaNext in one block if(myrank == nbproc-1 || nbproc == 1){ #pragma omp for schedule(static) for(int i = 0; i < size_X; i++){ for(int j = 0; j < ySize + 1; j++){ etaNext[i][j] = (-(h[2*i+2][2*j+1] * uCurr[i+1][j] - h[2*i][2*j+1] * uCurr[i][j]) / params->deltaX -(h[2*i+1][2*j+2] * vCurr[i][j+1] - h[2*i+1][2*j] * vCurr[i][j]) / params->deltaY) * params->deltaT + etaCurr[i][j]; } } } else{ // Process the last column separately from the rest because we need to use uReceived from the // the process with higher rank #pragma omp for schedule(static) for(int i = 0; i < size_X - 1; i++){ for(int j = 0; j < ySize + 1; j++){ etaNext[i][j] = (-(h[2*i+2][2*j+1] * uCurr[i+1][j] - h[2*i][2*j+1] * uCurr[i][j]) / params->deltaX -(h[2*i+1][2*j+2] * vCurr[i][j+1] - h[2*i+1][2*j] * vCurr[i][j]) / params->deltaY) * params->deltaT + etaCurr[i][j]; } } #pragma omp for schedule(static) for(int j = 0; j < ySize + 1; j++){ etaNext[size_X-1][j] = (-(h[2*(size_X-1)+2][2*j+1] * uReceived[j] - h[2*(size_X-1)][2*j+1] * uCurr[size_X-1][j]) / params->deltaX -(h[2*(size_X-1)+1][2*j+2] * vCurr[size_X-1][j+1] - h[2*(size_X-1)+1][2*j] * vCurr[size_X-1][j]) / params->deltaY) * params->deltaT + etaCurr[size_X-1][j]; } } } // In a multiprocess environment, sending the rightmost column of eta of the domain controlled // by the current process to the process with the previous rank if(nbproc != 1){ if(myrank == 0){ MPI_Send(etaCurr[size_X-1], ySize + 1, MPI_DOUBLE, 1, 42, MPI_COMM_WORLD); //Tag 42 is for eta }else if (myrank == nbproc -1){ MPI_Recv(etaReceived, ySize + 1, MPI_DOUBLE, myrank - 1, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE); }else{ MPI_Sendrecv(etaCurr[size_X-1], ySize + 1, MPI_DOUBLE, myrank + 1, 42, etaReceived, ySize + 1, MPI_DOUBLE, myrank - 1, 42,MPI_COMM_WORLD, MPI_STATUS_IGNORE); } } // uNext Boundary conditions if(myrank == 0 || nbproc == 1){ for(int i = 0; i < ySize + 1; i++){ uNext[0][i] = 0; } } if(myrank == nbproc -1 || nbproc == 1){ for(int i = 0; i < ySize + 1; i++){ uNext[size_X_u - 1][i] = 0; } } // Compute the next value of u #pragma omp parallel default(shared) { // Process uNext in one block if(nbproc == 1){ #pragma omp for schedule(static) for(int i = 1; i < size_X_u-1; i++){ for(int j = 0; j < ySize + 1; j++){ uNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i-1][j]) / params->deltaX -params->gamma * uCurr[i][j]) * params->deltaT + uCurr[i][j]; } } } else if(myrank == 0){ #pragma omp for schedule(static) for(int i = 1; i < size_X_u; i++){ for(int j = 0; j < ySize + 1; j++){ uNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i-1][j]) / params->deltaX -params->gamma * uCurr[i][j]) * params->deltaT + uCurr[i][j]; } } } else if(myrank == nbproc-1){ // Process the first column separately from the rest because we need to use etaReceived from the // the process with lower rank // The last process has a smaller size along the x axis #pragma omp for schedule(static) for(int j = 0; j < ySize + 1; j++){ uNext[0][j] = (-params->g * (etaCurr[0][j] - etaReceived[j]) / params->deltaX -params->gamma * uCurr[0][j]) * params->deltaT + uCurr[0][j]; } #pragma omp for schedule(static) for(int i = 1; i < size_X_u-1; i++){ for(int j = 0; j < ySize + 1; j++){ uNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i-1][j]) / params->deltaX -params->gamma * uCurr[i][j]) * params->deltaT + uCurr[i][j]; } } } else{ // Process the first column separately from the rest because we need to use etaReceived from the // the process with lower rank #pragma omp for schedule(static) for(int j = 0; j < ySize + 1; j++){ uNext[0][j] = (-params->g * (etaCurr[0][j] - etaReceived[j]) / params->deltaX -params->gamma * uCurr[0][j]) * params->deltaT + uCurr[0][j]; } #pragma omp for schedule(static) for(int i = 1; i < size_X_u; i++){ for(int j = 0; j < ySize + 1; j++){ uNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i-1][j]) / params->deltaX -params->gamma * uCurr[i][j]) * params->deltaT + uCurr[i][j]; } } } } // Boundary conditions for v for(int i = 0; i < size_X; i++) vNext[i][0] = 0; // Setting the excitation on the rightmost column of the whole domain space for(int i = 0; i < size_X; i++){ if(params->s == 0) //Sinusoidal excitation vNext[i][ySize+1] = params->A * sin(2 * M_PI * params->f * t * params->deltaT); else // Exponentially decaying excitation vNext[i][ySize+1] = params->A * sin(2 * M_PI * params->f * t * params->deltaT) * exp(- t * params->deltaT / 500); } // Compute the next value of v #pragma omp parallel default(shared) { #pragma omp for schedule(static) for(int i = 0; i < size_X; i++){ for(int j = 1; j < ySize + 1; j++){ vNext[i][j] = (-params->g * (etaCurr[i][j] - etaCurr[i][j-1]) / params->deltaY -params->gamma * vCurr[i][j]) * params->deltaT + vCurr[i][j]; } } } // Process 0 gathers the sub-matrices of the processes and saves them to disk if(params->S != 0 && t % params->S == 0){ gather_and_save(etaNext,uNext,vNext, xSize,ySize, t, params); } // Go to next step double** tmp; tmp = etaCurr; etaCurr = etaNext; etaNext = tmp; tmp = uCurr; uCurr = uNext; uNext = tmp; tmp = vCurr; vCurr = vNext; vNext = tmp; } // Return values *eta = etaCurr; *u = uCurr; *v = vCurr; freeDoubleMatrix(etaNext, size_X); freeDoubleMatrix(uNext, size_X_u); freeDoubleMatrix(vNext, size_X); freeDoubleMatrix((double**) h, size_X_h); free(uReceived); free(etaReceived); return 0; }
conv_kernel_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: quanwang@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include "conv_kernel_x86.h" #include "wino_conv_kernel_x86.h" #if __AVX__ #include <immintrin.h> #endif #ifndef _MSC_VER #include <sys/time.h> #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) #endif static int get_private_mem_size(struct ir_tensor* filter) { if (filter->data_type == TENGINE_DT_UINT8) // simulator uint8 inference with fp32 return filter->elem_num * filter->elem_size * 4; else return filter->elem_num * filter->elem_size; // caution } static void interleave(struct ir_tensor* filter, struct conv_priv_info* priv_info) { /* simply copy the data */ memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size); } static void interleave_uint8(struct ir_tensor* filter, struct conv_priv_info* priv_info) { /* dequant uint8 weight to fp32 for simulator */ float* weight_fp32 = (float* )priv_info->interleave_buffer; uint8_t* weight_uint8 = (uint8_t*)filter->data; float scale = filter->scale; int zero_point = filter->zero_point; for (int i = 0; i < filter->elem_num; i++) { weight_fp32[i] = ((float)weight_uint8[i] - (float)zero_point) * scale; } } void im2col_fp32(float* data_img, float* data_col, int inh, int inw, int inc, int outh, int outw, int ksize_h, int ksize_w, int sh, int sw, int ph, int pw, int dh, int dw) { const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float* out = data_col + (c * outh + h) * outw; const float* end = out + w_high; if (im_row >= 0 && im_row < inh) { float* in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; *(out++) = *in; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_uint8(uint8_t* data_img, float* data_col, struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, struct conv_param* param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; float scale = input_tensor->scale; int zero_point = input_tensor->zero_point; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float* out = data_col + (c * outh + h) * outw; const float* end = out + w_high; if (im_row >= 0 && im_row < inh) { uint8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; float in_fp32 = ((float)in[0] - (float)zero_point) * scale; out[0] = in_fp32; out++; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_int8(int8_t* data_img, int8_t* data_col, struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, struct conv_param* param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; int8_t * out = data_col + (c * outh + h) * outw; const int8_t * end = out + w_high; if (im_row >= 0 && im_row < inh) { int8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(int8_t)); out += w_low; while (out < end) { in += sw; out[0] = in[0]; out++; } memset(out, 0, (outw - w_high) * sizeof(int8_t)); } else { memset(out, 0, outw * sizeof(int8_t)); } } } } static void im2col_ir(struct ir_tensor* input, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group) { int input_chan = param->input_channel / param->group; int image_size = input->dims[1] * input->dims[2] * input->dims[3]; int group_size = input_chan * input->dims[2] * input->dims[3]; void* input_base = (void*)((uint8_t*)input->data + (n * image_size + group * group_size) * input->elem_size); void* im2col_buf = (void*)priv_info->im2col_buffer; if (input->data_type == TENGINE_DT_FP32) { im2col_fp32(input_base, im2col_buf, input->dims[2], input->dims[3], input_chan, output->dims[2], output->dims[3], param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->dilation_h, param->dilation_w); } else if (input->data_type == TENGINE_DT_UINT8) { im2col_uint8(input_base, im2col_buf, input, output, param); } else if (input->data_type == TENGINE_DT_INT8) { im2col_int8(input_base, im2col_buf, input, output, param); } else { printf("Input data type %d not to be supported.\n", input->data_type); } } void input_pack4_fp32(int K, int N, float* pB, float* pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; // [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const float* img = pB + i; float* tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { #if __AVX__ _mm256_storeu_ps(tmp, _mm256_loadu_ps(img)); #else tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; #endif // __SSE__ tmp += 8; img += N; } } // [ch00, ch01, ch02, ch03 ....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const float* img = pB + i; float* tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_fp(int M, int N, int K, float* pA_t, float* pB_t, float* pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(num_thread) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; float* output0 = pC + ( i )*N; float* output1 = pC + (i + 1) * N; float* output2 = pC + (i + 2) * N; float* output3 = pC + (i + 3) * N; float* output4 = pC + (i + 4) * N; float* output5 = pC + (i + 5) * N; float* output6 = pC + (i + 6) * N; float* output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); __m256 _sum4 = _mm256_set1_ps(0.0); __m256 _sum5 = _mm256_set1_ps(0.0); __m256 _sum6 = _mm256_set1_ps(0.0); __m256 _sum7 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); // sum7 = (a00-a07) * k70 va += 8; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); // sum4 += (a10-a17) * k41 _sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); // sum5 += (a10-a17) * k51 _sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); // sum6 += (a10-a17) * k61 _sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); // sum7 += (a10-a17) * k71 va += 8; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); // sum4 += (a20-a27) * k42 _sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); // sum5 += (a20-a27) * k52 _sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); // sum6 += (a20-a27) * k62 _sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); // sum7 += (a20-a27) * k72 va += 8; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); // sum4 += (a30-a37) * k43 _sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); // sum5 += (a30-a37) * k53 _sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); // sum6 += (a30-a37) * k63 _sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); // sum7 += (a30-a37) * k73 va += 8; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _va4 = _mm256_broadcast_ss(va + 4); __m256 _va5 = _mm256_broadcast_ss(va + 5); __m256 _va6 = _mm256_broadcast_ss(va + 6); __m256 _va7 = _mm256_broadcast_ss(va + 7); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); // sum7 = (a00-a07) * k70 va += 8; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); _mm256_storeu_ps(output4, _sum4); _mm256_storeu_ps(output5, _sum5); _mm256_storeu_ps(output6, _sum6); _mm256_storeu_ps(output7, _sum7); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; float sum4[8] = {0}; float sum5[8] = {0}; float sum6[8] = {0}; float sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m256 _sum0_7 = _mm256_set1_ps(0.0); __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _vb1 = _mm256_broadcast_ss(vb + 1); __m256 _vb2 = _mm256_broadcast_ss(vb + 2); __m256 _vb3 = _mm256_broadcast_ss(vb + 3); __m256 _va0 = _mm256_loadu_ps(va); __m256 _va1 = _mm256_loadu_ps(va + 8); __m256 _va2 = _mm256_loadu_ps(va + 16); __m256 _va3 = _mm256_loadu_ps(va + 24); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k70) * a00 _sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k71) * a10 _sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k72) * a20 _sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k73) * a30 va += 32; vb += 4; } _sum0 = _mm256_add_ps(_sum0, _sum1); _sum2 = _mm256_add_ps(_sum2, _sum3); _sum0_7 = _mm256_add_ps(_sum0_7, _sum0); _sum0_7 = _mm256_add_ps(_sum0_7, _sum2); for (; k < K; k++) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _va = _mm256_loadu_ps(va); _sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7); // sum0 += (k00-k70) * a00 va += 8; vb += 1; } float output_sum0_7[8] = {0.f}; _mm256_storeu_ps(output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; float sum4 = 0; float sum5 = 0; float sum6 = 0; float sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif // __AVX__ output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; float* output0 = pC + ( i )*N; float* output1 = pC + (i + 1) * N; float* output2 = pC + (i + 2) * N; float* output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 va += 4; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 va += 4; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 va += 4; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m128 _sum0_3 = _mm_set1_ps(0.0); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va + 4); __m128 _va2 = _mm_loadu_ps(va + 8); __m128 _va3 = _mm_loadu_ps(va + 12); _sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k30) * a00 _sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k31) * a10 _sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k32) * a20 _sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k < K; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3); // sum0 += (k00-k30) * a00 va += 4; vb += 1; } float output_sum0_3[4] = {0.f}; _mm_storeu_ps(output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __AVX__ output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; // output ch0 for (int i = remain_outch_start; i < M; i++) { float* output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); // sum0 += (a10-a17) * k01 _sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); // sum0 += (a20-a27) * k02 _sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); // sum0 += (a30-a37) * k03 va += 4; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 va += 1; vb += 8; } _mm256_storeu_ps(output, _sum0); #else float sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } #endif // __AVX__ output += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; #if __AVX__ __m128 _sum0 = _mm_set1_ps(0.f); for (; k + 3 < K; k += 4) { __m128 _p0 = _mm_loadu_ps(vb); __m128 _k0 = _mm_loadu_ps(va); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0)); va += 4; vb += 4; } #ifdef _WIN32 float sum0 = _sum0.m128_f32[0] + _sum0.m128_f32[1] + _sum0.m128_f32[2] + _sum0.m128_f32[3]; #else float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3]; #endif #else float sum0 = 0.f; #endif // __AVX__ for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } void input_pack4_int8(int K, int N, int8_t* pB, int8_t* pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; // [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const int8_t* img = pB + i; int8_t* tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; tmp += 8; img += N; } } // [ch00, ch01, ch02, ch03 ....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const int8_t* img = pB + i; int8_t* tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_i8(int M, int N, int K, int8_t* pA_t, int8_t* pB_t, int32_t* pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(num_thread) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; int32_t* output0 = pC + ( i )*N; int32_t* output1 = pC + (i + 1) * N; int32_t* output2 = pC + (i + 2) * N; int32_t* output3 = pC + (i + 3) * N; int32_t* output4 = pC + (i + 4) * N; int32_t* output5 = pC + (i + 5) * N; int32_t* output6 = pC + (i + 6) * N; int32_t* output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); __m256i _sum4 = _mm256_set1_epi32(0); __m256i _sum5 = _mm256_set1_epi32(0); __m256i _sum6 = _mm256_set1_epi32(0); __m256i _sum7 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum7); va += 8; // k1 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum7); va += 8; // k2 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum7); va += 8; // k3 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum7); va += 8; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _va4 = _mm256_set1_epi32(*(va + 4)); __m256i _va5 = _mm256_set1_epi32(*(va + 5)); __m256i _va6 = _mm256_set1_epi32(*(va + 6)); __m256i _va7 = _mm256_set1_epi32(*(va + 7)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va4), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va5), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va6), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va7), _sum7); va += 8; vb += 8; } _mm256_storeu_si256((__m256i* )output0, _sum0); _mm256_storeu_si256((__m256i* )output1, _sum1); _mm256_storeu_si256((__m256i* )output2, _sum2); _mm256_storeu_si256((__m256i* )output3, _sum3); _mm256_storeu_si256((__m256i* )output4, _sum4); _mm256_storeu_si256((__m256i* )output5, _sum5); _mm256_storeu_si256((__m256i* )output6, _sum6); _mm256_storeu_si256((__m256i* )output7, _sum7); #else int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; int32_t sum4[8] = {0}; int32_t sum5[8] = {0}; int32_t sum6[8] = {0}; int32_t sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } #endif output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0_7 = _mm256_set1_epi32(0); __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _vb1 = _mm256_set1_epi32(*(vb + 1)); __m256i _vb2 = _mm256_set1_epi32(*(vb + 2)); __m256i _vb3 = _mm256_set1_epi32(*(vb + 3)); __m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); __m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 8))); __m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 16))); __m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3); va += 32; vb += 4; } _sum0 = _mm256_add_epi32(_sum0, _sum1); _sum2 = _mm256_add_epi32(_sum2, _sum3); _sum0_7 = _mm256_add_epi32(_sum0_7, _sum0); _sum0_7 = _mm256_add_epi32(_sum0_7, _sum2); for (; k < K; k++) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); _sum0_7 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_7); va += 8; vb += 1; } int32_t output_sum0_7[8] = {0}; _mm256_storeu_si256((__m256i* )output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; int32_t sum4 = 0; int32_t sum5 = 0; int32_t sum6 = 0; int32_t sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; int32_t* output0 = pC + ( i )*N; int32_t* output1 = pC + (i + 1) * N; int32_t* output2 = pC + (i + 2) * N; int32_t* output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = K + 4) { // k0 __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); va += 4; // k1 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3); va += 4; // k2 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3); va += 4; // k3 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3); va += 4; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); va += 4; vb += 8; } _mm256_storeu_si256((__m256i* )output0, _sum0); _mm256_storeu_si256((__m256i* )output1, _sum1); _mm256_storeu_si256((__m256i* )output2, _sum2); _mm256_storeu_si256((__m256i* )output3, _sum3); #else int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0_3 = _mm256_set1_epi32(0); __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k=0; for (; k + 3 < K; k = k + 4) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _vb1 = _mm256_set1_epi32(*(vb + 1)); __m256i _vb2 = _mm256_set1_epi32(*(vb + 2)); __m256i _vb3 = _mm256_set1_epi32(*(vb + 3)); __m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); __m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 4))); __m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 8))); __m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 12))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3); va+=16; vb+=4; } _sum0 = _mm256_add_epi32(_sum0, _sum1); _sum2 = _mm256_add_epi32(_sum2, _sum3); _sum0_3 = _mm256_add_epi32(_sum0_3, _sum0); _sum0_3 = _mm256_add_epi32(_sum0_3, _sum2); for (; k < K; k++) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); _sum0_3 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_3); va += 4; vb += 1; } //drop last 4 value int32_t output_sum0_3[4] = {0}; _mm256_storeu_si256((__m256i* )output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; // output ch0 for (int i = remain_outch_start; i < M; i++) { int32_t* output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum0); va += 4; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); va += 1; vb += 8; } _mm256_storeu_si256((__m256i* )output, _sum0); #else int32_t sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } #endif output += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; int32_t sum0 = 0.f; for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } static void sgemm_fp32(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; float* output_fp32 = ( float* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; float* bias_fp32 = NULL; if (bias) bias_fp32 = ( float* )bias->data + outchan_g * group; float* filter_sgemm = interleave_fp32; float* input_sgemm_pack4 = im2col_pack4_fp32; float* output_sgemm = output_fp32; sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); // process bias if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_fp32[output_off] += bias_fp32[i]; } } } // process activation relu if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } // process activation relu6 if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } } static void sgemm_uint8(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; uint8_t * output_uint8 = ( uint8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; int* bias_int32 = NULL; float bias_scale = 0.f; if (bias) { bias_int32 = ( int* )bias->data + outchan_g * group; bias_scale = input->scale * filter->scale; } float* filter_sgemm = interleave_fp32; float* input_sgemm_pack4 = im2col_pack4_fp32; float* output_sgemm = (float*)sys_malloc(outchan_g * out_h * out_w * sizeof(float)); sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); /* process bias */ if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_sgemm[output_off] += (float )bias_int32[i] * bias_scale; } } } /* process activation relu */ if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; if (output_sgemm[output_off] > 6) output_sgemm[output_off] = 6; } } } /* quant from fp32 to uint8 */ for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int udata = ( int )(round(output_sgemm[output_off] / output->scale) + output->zero_point); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[output_off] = udata; } } sys_free(output_sgemm); } static void sgemm_int8(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; int8_t* interleave_int8 = ( int8_t* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; int8_t* im2col_pack4_int8 = priv_info->im2col_buffer_pack4; int8_t * output_int8 = ( int8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; int32_t * bias_int32 = NULL; if (bias) bias_int32 = ( int* )bias->data + outchan_g * group; float input_scale = input->scale; float* kernel_scales = filter->scale_list; float output_scale = output->scale; int8_t* filter_sgemm = interleave_int8; int8_t* input_sgemm_pack4 = im2col_pack4_int8; int32_t* output_sgemm_int32 = (int32_t*)sys_malloc(outchan_g * out_h * out_w * sizeof(int32_t)); float* output_sgemm_fp32 = (float*)sys_malloc(outchan_g * out_h * out_w * sizeof(float)); sgemm_i8(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm_int32, num_thread); /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (bias) output_sgemm_fp32[output_off] = (float )(output_sgemm_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_sgemm_fp32[output_off] = (float )output_sgemm_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; if (output_sgemm_fp32[output_off] > 6) output_sgemm_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ for (int i = 0; i < outchan_g; i++) { #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int32_t data_i32 = ( int32_t )(round(output_sgemm_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_sgemm_int32); sys_free(output_sgemm_fp32); } /* check the conv wheather need to be using winograd */ static int winograd_support(struct conv_param* param, int in_h, int in_w) { int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int input_chan = param->input_channel; int output_chan = param->output_channel; int group = param->group; if (in_h <= 10 && in_w <= 10) return 0; if (group != 1 || kernel_h != 3 || kernel_w != 3 || stride_h != 1 || stride_w != 1 || dilation_h != 1 || dilation_w != 1 || input_chan < 16 || output_chan < 16 || output_chan % 16) return 0; return 1; } int conv_hcl_get_shared_mem_size(struct ir_tensor* input, struct ir_tensor* output, struct conv_param* param) { int group = param->group; int input_chan = param->input_channel / group; int kernel_size = input_chan * param->kernel_h * param->kernel_w; int output_xy = output->dims[2] * output->dims[3]; int elem_size = input->elem_size; // simulator uint8 inference with fp32 if (input->data_type == TENGINE_DT_UINT8) elem_size = 4; return elem_size * output_xy * kernel_size; } int conv_hcl_get_shared_pack4_mem_size(struct ir_tensor* filter, struct ir_tensor* output, struct conv_param* param) { int K = filter->elem_num / filter->dims[0]; int N = output->dims[2] * output->dims[3]; int elem_size = filter->elem_size; // simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; return (8 * K * (N / 8 + N % 8)) * elem_size; } int conv_hcl_get_interleave_pack4_size(int M, int K, struct ir_tensor* filter) { int elem_size = filter->elem_size; // simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; int size = 8 * K * (M / 8 + (M % 8) / 4 + M % 4) * elem_size; return size; } void conv_hcl_interleave_pack4_fp32(int M, int K, struct conv_priv_info* priv_info) { float* pA = ( float* )priv_info->interleave_buffer; float* pA_t = ( float* )priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const float* k0 = pA + (p + 0) * K; const float* k1 = pA + (p + 1) * K; const float* k2 = pA + (p + 2) * K; const float* k3 = pA + (p + 3) * K; const float* k4 = pA + (p + 4) * K; const float* k5 = pA + (p + 5) * K; const float* k6 = pA + (p + 6) * K; const float* k7 = pA + (p + 7) * K; float* ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const float* k0 = pA + (p + 0) * K; const float* k1 = pA + (p + 1) * K; const float* k2 = pA + (p + 2) * K; const float* k3 = pA + (p + 3) * K; float* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const float* k0 = pA + (p + 0) * K; float* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } void conv_hcl_interleave_pack4_int8(int M, int K, struct conv_priv_info* priv_info) { int8_t* pA = ( int8_t * )priv_info->interleave_buffer; int8_t* pA_t = ( int8_t* )priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const int8_t* k0 = pA + (p + 0) * K; const int8_t* k1 = pA + (p + 1) * K; const int8_t* k2 = pA + (p + 2) * K; const int8_t* k3 = pA + (p + 3) * K; const int8_t* k4 = pA + (p + 4) * K; const int8_t* k5 = pA + (p + 5) * K; const int8_t* k6 = pA + (p + 6) * K; const int8_t* k7 = pA + (p + 7) * K; int8_t* ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const int8_t* k0 = pA + (p + 0) * K; const int8_t* k1 = pA + (p + 1) * K; const int8_t* k2 = pA + (p + 2) * K; const int8_t* k3 = pA + (p + 3) * K; int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const int8_t* k0 = pA + (p + 0) * K; int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } int conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; /* check winograd implement, only for conv3x3s1 */ if (input_tensor->data_type == TENGINE_DT_FP32) { priv_info->winograd = winograd_support(param, in_h, in_w); if (priv_info->winograd) { return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param); } } if (!priv_info->external_im2col_mem) { int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; } if (!priv_info->external_im2col_pack4_mem) { int mem_size = conv_hcl_get_shared_pack4_mem_size(filter_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; } if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } if (input_tensor->data_type == TENGINE_DT_UINT8) interleave_uint8(filter_tensor, priv_info); else interleave(filter_tensor, priv_info); if (priv_info->external_interleave_pack4_mem) { int M = filter_tensor->dims[0]; int K = filter_tensor->elem_num / filter_tensor->dims[0]; int mem_size = conv_hcl_get_interleave_pack4_size(M, K, filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer_pack4 = mem; priv_info->interleave_buffer_pack4_size = mem_size; if (input_tensor->data_type == TENGINE_DT_FP32 || input_tensor->data_type == TENGINE_DT_UINT8) conv_hcl_interleave_pack4_fp32(M, K, priv_info); else conv_hcl_interleave_pack4_int8(M, K, priv_info); if (!priv_info->external_interleave_mem && priv_info->interleave_buffer) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } } else { priv_info->interleave_buffer_pack4 = priv_info->interleave_buffer; priv_info->interleave_buffer_pack4_size = priv_info->interleave_buffer_size; } return 0; } int conv_hcl_postrun(struct conv_priv_info* priv_info) { if (priv_info->winograd) { return wino_conv_hcl_postrun(priv_info); } if (priv_info->external_interleave_pack4_mem && !priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL) { sys_free(priv_info->im2col_buffer); priv_info->im2col_buffer = NULL; } if (!priv_info->external_im2col_pack4_mem && priv_info->im2col_buffer_pack4 != NULL) { sys_free(priv_info->im2col_buffer_pack4); priv_info->im2col_buffer_pack4 = NULL; } if (priv_info->external_interleave_pack4_mem && priv_info->interleave_buffer_pack4 != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } return 0; } int conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { int group = param->group; int type = input_tensor->data_type; if (priv_info->winograd) { return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity); } for (int i = 0; i < input_tensor->dims[0]; i++) // batch size { for (int j = 0; j < group; j++) { im2col_ir(input_tensor, output_tensor, priv_info, param, i, j); int K = filter_tensor->elem_num / filter_tensor->dims[0]; int N = output_tensor->dims[2] * output_tensor->dims[3]; void* im2col_buffer = priv_info->im2col_buffer; if (priv_info->external_interleave_pack4_mem) { if (type == TENGINE_DT_FP32 || type == TENGINE_DT_UINT8) input_pack4_fp32(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); else input_pack4_int8(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); } else { priv_info->im2col_buffer_pack4 = im2col_buffer; } if (type == TENGINE_DT_FP32) sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_UINT8) sgemm_uint8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_INT8) sgemm_int8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else { printf("Input data type %d not to be supported.\n", input_tensor->data_type); return -1; } } } return 0; } int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_mem = 1; priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; return 0; } int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_pack4_mem = 1; priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; return 0; }
/* * Copyright (c) 2020, OPEN AI LAB Author: quanwang@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include "conv_kernel_x86.h" #include "wino_conv_kernel_x86.h" #if __AVX__ #include <immintrin.h> #endif #ifndef _MSC_VER #include <sys/time.h> #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) #endif static int get_private_mem_size(struct ir_tensor *filter) { if (filter->data_type == TENGINE_DT_UINT8) //simulator uint8 inference with fp32 return filter->elem_num * filter->elem_size * 4; else return filter->elem_num * filter->elem_size; //caution } static void interleave(struct ir_tensor *filter, struct conv_priv_info *priv_info) { /* simply copy the data */ memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size); } static void interleave_uint8(struct ir_tensor *filter, struct conv_priv_info *priv_info) { /* dequant uint8 weight to fp32 for simulator */ float *weight_fp32 = (float *)priv_info->interleave_buffer; uint8_t *weight_uint8 = (uint8_t *) filter->data; float scale = filter->scale; int zero_point = filter->zero_point; for (int i = 0; i < filter->elem_num; i++) { weight_fp32[i] = ((float)weight_uint8[i] - (float)zero_point) * scale; } } void im2col_fp32(float *data_img, float *data_col, int inh, int inw, int inc, int outh, int outw, int ksize_h, int ksize_w, int sh, int sw, int ph, int pw, int dh, int dw) { const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float *out = data_col + (c * outh + h) * outw; const float *end = out + w_high; if (im_row >= 0 && im_row < inh) { float *in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; *(out++) = *in; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_uint8(uint8_t * data_img, float *data_col, struct ir_tensor *input_tensor, struct ir_tensor *output_tensor, struct conv_param *param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; float scale = input_tensor->scale; int zero_point = input_tensor->zero_point; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float *out = data_col + (c * outh + h) * outw; const float *end = out + w_high; if (im_row >= 0 && im_row < inh) { uint8_t *in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; float in_fp32 = ((float)in[0] - (float)zero_point) * scale; out[0] = in_fp32; out++; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_int8(int8_t * data_img, int8_t * data_col, struct ir_tensor *input_tensor, struct ir_tensor *output_tensor, struct conv_param *param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; int8_t *out = data_col + (c * outh + h) * outw; const int8_t *end = out + w_high; if (im_row >= 0 && im_row < inh) { int8_t *in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(int8_t)); out += w_low; while (out < end) { in += sw; out[0] = in[0]; out++; } memset(out, 0, (outw - w_high) * sizeof(int8_t)); } else { memset(out, 0, outw * sizeof(int8_t)); } } } } static void im2col_ir(struct ir_tensor *input, struct ir_tensor *output, struct conv_priv_info *priv_info, struct conv_param *param, int n, int group) { int input_chan = param->input_channel / param->group; int image_size = input->dims[1] * input->dims[2] * input->dims[3]; int group_size = input_chan * input->dims[2] * input->dims[3]; void *input_base = (void *)((uint8_t *) input->data + (n * image_size + group * group_size) * input->elem_size); void *im2col_buf = (void *)priv_info->im2col_buffer; if (input->data_type == TENGINE_DT_FP32) { im2col_fp32(input_base, im2col_buf, input->dims[2], input->dims[3], input_chan, output->dims[2], output->dims[3], param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->dilation_h, param->dilation_w); } else if (input->data_type == TENGINE_DT_UINT8) { im2col_uint8(input_base, im2col_buf, input, output, param); } else if (input->data_type == TENGINE_DT_INT8) { im2col_int8(input_base, im2col_buf, input, output, param); } else { printf("Input data type %d not to be supported.\n", input->data_type); } } void input_pack4_fp32(int K, int N, float *pB, float *pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; //[ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33....] for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const float *img = pB + i; float *tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { #if __AVX__ _mm256_storeu_ps(tmp, _mm256_loadu_ps(img)); #else tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; #endif /* // __SSE__ */ tmp += 8; img += N; } } //[ch00, ch01, ch02, ch03....] for (int i = remian_size_start; i < N; i++) { const float *img = pB + i; float *tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_fp(int M, int N, int K, float *pA_t, float *pB_t, float *pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; float *output0 = pC + (i) * N; float *output1 = pC + (i + 1) * N; float *output2 = pC + (i + 2) * N; float *output3 = pC + (i + 3) * N; float *output4 = pC + (i + 4) * N; float *output5 = pC + (i + 5) * N; float *output6 = pC + (i + 6) * N; float *output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { float *va = pA_t + (i / 8) * 8 * K; float *vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); __m256 _sum4 = _mm256_set1_ps(0.0); __m256 _sum5 = _mm256_set1_ps(0.0); __m256 _sum6 = _mm256_set1_ps(0.0); __m256 _sum7 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { //k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); //sum0 = (a00 - a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); //sum1 = (a00 - a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); //sum2 = (a00 - a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); //sum3 = (a00 - a07) * k30 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); //sum4 = (a00 - a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); //sum5 = (a00 - a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); //sum6 = (a00 - a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); //sum7 = (a00 - a07) * k70 va += 8; //k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); //sum0 += (a10 - a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); //sum1 += (a10 - a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); //sum2 += (a10 - a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); //sum3 += (a10 - a17) * k31 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); //sum4 += (a10 - a17) * k41 _sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); //sum5 += (a10 - a17) * k51 _sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); //sum6 += (a10 - a17) * k61 _sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); //sum7 += (a10 - a17) * k71 va += 8; //k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); //sum0 += (a20 - a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); //sum1 += (a20 - a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); //sum2 += (a20 - a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); //sum3 += (a20 - a27) * k32 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); //sum4 += (a20 - a27) * k42 _sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); //sum5 += (a20 - a27) * k52 _sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); //sum6 += (a20 - a27) * k62 _sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); //sum7 += (a20 - a27) * k72 va += 8; //k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); //sum0 += (a30 - a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); //sum1 += (a30 - a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); //sum2 += (a30 - a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); //sum3 += (a30 - a37) * k33 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); //sum4 += (a30 - a37) * k43 _sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); //sum5 += (a30 - a37) * k53 _sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); //sum6 += (a30 - a37) * k63 _sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); //sum7 += (a30 - a37) * k73 va += 8; vb += 32; } for (; k < K; k++) { //k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _va4 = _mm256_broadcast_ss(va + 4); __m256 _va5 = _mm256_broadcast_ss(va + 5); __m256 _va6 = _mm256_broadcast_ss(va + 6); __m256 _va7 = _mm256_broadcast_ss(va + 7); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); //sum0 = (a00 - a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); //sum1 = (a00 - a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); //sum2 = (a00 - a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); //sum3 = (a00 - a07) * k30 _sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); //sum4 = (a00 - a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); //sum5 = (a00 - a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); //sum6 = (a00 - a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); //sum7 = (a00 - a07) * k70 va += 8; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); _mm256_storeu_ps(output4, _sum4); _mm256_storeu_ps(output5, _sum5); _mm256_storeu_ps(output6, _sum6); _mm256_storeu_ps(output7, _sum7); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; float sum4[8] = {0}; float sum5[8] = {0}; float sum6[8] = {0}; float sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } #endif /* // __AVX__ */ output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { float *va = pA_t + (i / 8) * 8 * K; float *vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m256 _sum0_7 = _mm256_set1_ps(0.0); __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _vb1 = _mm256_broadcast_ss(vb + 1); __m256 _vb2 = _mm256_broadcast_ss(vb + 2); __m256 _vb3 = _mm256_broadcast_ss(vb + 3); __m256 _va0 = _mm256_loadu_ps(va); __m256 _va1 = _mm256_loadu_ps(va + 8); __m256 _va2 = _mm256_loadu_ps(va + 16); __m256 _va3 = _mm256_loadu_ps(va + 24); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); //sum0 += (k00 - k70) * a00 _sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1); //sum1 += (k01 - k71) * a10 _sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2); //sum2 += (k02 - k72) * a20 _sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3); //sum3 += (k03 - k73) * a30 va += 32; vb += 4; } _sum0 = _mm256_add_ps(_sum0, _sum1); _sum2 = _mm256_add_ps(_sum2, _sum3); _sum0_7 = _mm256_add_ps(_sum0_7, _sum0); _sum0_7 = _mm256_add_ps(_sum0_7, _sum2); for (; k < K; k++) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _va = _mm256_loadu_ps(va); _sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7); //sum0 += (k00 - k70) * a00 va += 8; vb += 1; } float output_sum0_7[8] = {0. f}; _mm256_storeu_ps(output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; float sum4 = 0; float sum5 = 0; float sum6 = 0; float sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif /* // __AVX__ */ output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; float *output0 = pC + (i) * N; float *output1 = pC + (i + 1) * N; float *output2 = pC + (i + 2) * N; float *output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { float *va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float *vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { //k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); //sum0 = (a00 - a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); //sum1 = (a00 - a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); //sum2 = (a00 - a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); //sum3 = (a00 - a07) * k30 va += 4; //k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); //sum0 += (a10 - a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); //sum1 += (a10 - a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); //sum2 += (a10 - a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); //sum3 += (a10 - a17) * k31 va += 4; //k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); //sum0 += (a20 - a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); //sum1 += (a20 - a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); //sum2 += (a20 - a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); //sum3 += (a20 - a27) * k32 va += 4; //k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); //sum0 += (a30 - a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); //sum1 += (a30 - a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); //sum2 += (a30 - a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); //sum3 += (a30 - a37) * k33 va += 4; vb += 32; } for (; k < K; k++) { //k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); //sum0 = (a00 - a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); //sum1 = (a00 - a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); //sum2 = (a00 - a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); //sum3 = (a00 - a07) * k30 va += 4; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif /* // __AVX__ */ output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { float *va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float *vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m128 _sum0_3 = _mm_set1_ps(0.0); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va + 4); __m128 _va2 = _mm_loadu_ps(va + 8); __m128 _va3 = _mm_loadu_ps(va + 12); _sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0); //sum0 += (k00 - k30) * a00 _sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1); //sum1 += (k01 - k31) * a10 _sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2); //sum2 += (k02 - k32) * a20 _sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3); //sum3 += (k03 - k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k < K; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3); //sum0 += (k00 - k30) * a00 va += 4; vb += 1; } float output_sum0_3[4] = {0. f}; _mm_storeu_ps(output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif /* // __AVX__ */ output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; //output ch0 for (int i = remain_outch_start; i < M; i++) { float *output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { float *va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float *vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { //k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); //sum0 = (a00 - a07) * k00 _sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); //sum0 += (a10 - a17) * k01 _sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); //sum0 += (a20 - a27) * k02 _sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); //sum0 += (a30 - a37) * k03 va += 4; vb += 32; } for (; k < K; k++) { //k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); //sum0 = (a00 - a07) * k00 va += 1; vb += 8; } _mm256_storeu_ps(output, _sum0); #else float sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } #endif /* // __AVX__ */ output += 8; } for (; j < N; j++) { float *va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float *vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; #if __AVX__ __m128 _sum0 = _mm_set1_ps(0. f); for (; k + 3 < K; k += 4) { __m128 _p0 = _mm_loadu_ps(vb); __m128 _k0 = _mm_loadu_ps(va); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0)); va += 4; vb += 4; } #ifdef _WIN32 float sum0 = _sum0.m128_f32[0] + _sum0.m128_f32[1] + _sum0.m128_f32[2] + _sum0.m128_f32[3]; #else float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3]; #endif #else float sum0 = 0. f; #endif /* // __AVX__ */ for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } void input_pack4_int8(int K, int N, int8_t * pB, int8_t * pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; //[ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33....] for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const int8_t *img = pB + i; int8_t *tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; tmp += 8; img += N; } } //[ch00, ch01, ch02, ch03....] for (int i = remian_size_start; i < N; i++) { const int8_t *img = pB + i; int8_t *tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_i8(int M, int N, int K, int8_t * pA_t, int8_t * pB_t, int32_t * pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; int32_t *output0 = pC + (i) * N; int32_t *output1 = pC + (i + 1) * N; int32_t *output2 = pC + (i + 2) * N; int32_t *output3 = pC + (i + 3) * N; int32_t *output4 = pC + (i + 4) * N; int32_t *output5 = pC + (i + 5) * N; int32_t *output6 = pC + (i + 6) * N; int32_t *output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t *va = pA_t + (i / 8) * 8 * K; int8_t *vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); __m256i _sum4 = _mm256_set1_epi32(0); __m256i _sum5 = _mm256_set1_epi32(0); __m256i _sum6 = _mm256_set1_epi32(0); __m256i _sum7 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { //k0 __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum7); va += 8; //k1 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum7); va += 8; //k2 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum7); va += 8; //k3 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum7); va += 8; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _va4 = _mm256_set1_epi32(*(va + 4)); __m256i _va5 = _mm256_set1_epi32(*(va + 5)); __m256i _va6 = _mm256_set1_epi32(*(va + 6)); __m256i _va7 = _mm256_set1_epi32(*(va + 7)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va4), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va5), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va6), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va7), _sum7); va += 8; vb += 8; } _mm256_storeu_si256((__m256i *) output0, _sum0); _mm256_storeu_si256((__m256i *) output1, _sum1); _mm256_storeu_si256((__m256i *) output2, _sum2); _mm256_storeu_si256((__m256i *) output3, _sum3); _mm256_storeu_si256((__m256i *) output4, _sum4); _mm256_storeu_si256((__m256i *) output5, _sum5); _mm256_storeu_si256((__m256i *) output6, _sum6); _mm256_storeu_si256((__m256i *) output7, _sum7); #else int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; int32_t sum4[8] = {0}; int32_t sum5[8] = {0}; int32_t sum6[8] = {0}; int32_t sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } #endif output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { int8_t *va = pA_t + (i / 8) * 8 * K; int8_t *vb = pB_t + (j / 8 + j % 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0_7 = _mm256_set1_epi32(0); __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _vb1 = _mm256_set1_epi32(*(vb + 1)); __m256i _vb2 = _mm256_set1_epi32(*(vb + 2)); __m256i _vb3 = _mm256_set1_epi32(*(vb + 3)); __m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) va)); __m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (va + 8))); __m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (va + 16))); __m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (va + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3); va += 32; vb += 4; } _sum0 = _mm256_add_epi32(_sum0, _sum1); _sum2 = _mm256_add_epi32(_sum2, _sum3); _sum0_7 = _mm256_add_epi32(_sum0_7, _sum0); _sum0_7 = _mm256_add_epi32(_sum0_7, _sum2); for (; k < K; k++) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) va)); _sum0_7 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_7); va += 8; vb += 1; } int32_t output_sum0_7[8] = {0}; _mm256_storeu_si256((__m256i *) output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; int32_t sum4 = 0; int32_t sum5 = 0; int32_t sum6 = 0; int32_t sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; int32_t *output0 = pC + (i) * N; int32_t *output1 = pC + (i + 1) * N; int32_t *output2 = pC + (i + 2) * N; int32_t *output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t *va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t *vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = K + 4) { //k0 __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); va += 4; //k1 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3); va += 4; //k2 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3); va += 4; //k3 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3); va += 4; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); va += 4; vb += 8; } _mm256_storeu_si256((__m256i *) output0, _sum0); _mm256_storeu_si256((__m256i *) output1, _sum1); _mm256_storeu_si256((__m256i *) output2, _sum2); _mm256_storeu_si256((__m256i *) output3, _sum3); #else int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { int8_t *va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t *vb = pB_t + (j / 8 + j % 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0_3 = _mm256_set1_epi32(0); __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _vb1 = _mm256_set1_epi32(*(vb + 1)); __m256i _vb2 = _mm256_set1_epi32(*(vb + 2)); __m256i _vb3 = _mm256_set1_epi32(*(vb + 3)); __m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) va)); __m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (va + 4))); __m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (va + 8))); __m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (va + 12))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3); va += 16; vb += 4; } _sum0 = _mm256_add_epi32(_sum0, _sum1); _sum2 = _mm256_add_epi32(_sum2, _sum3); _sum0_3 = _mm256_add_epi32(_sum0_3, _sum0); _sum0_3 = _mm256_add_epi32(_sum0_3, _sum2); for (; k < K; k++) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) va)); _sum0_3 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_3); va += 4; vb += 1; } //drop last 4 value int32_t output_sum0_3[4] = { 0 }; _mm256_storeu_si256((__m256i *) output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; //output ch0 for (int i = remain_outch_start; i < M; i++) { int32_t *output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t *va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t *vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum0); va += 4; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); va += 1; vb += 8; } _mm256_storeu_si256((__m256i *) output, _sum0); #else int32_t sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } #endif output += 8; } for (; j < N; j++) { int8_t *va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t *vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; int32_t sum0 = 0. f; for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } static void sgemm_fp32(struct ir_tensor *input, struct ir_tensor *filter, struct ir_tensor *bias, struct ir_tensor *output, struct conv_priv_info *priv_info, struct conv_param *param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float *interleave_fp32 = (float *)priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float *im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; float *output_fp32 = (float *)output->data + n * out_image_size + outchan_g * group * out_h * out_w; float *bias_fp32 = NULL; if (bias) bias_fp32 = (float *)bias->data + outchan_g * group; float *filter_sgemm = interleave_fp32; float *input_sgemm_pack4 = im2col_pack4_fp32; float *output_sgemm = output_fp32; sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); //process bias if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_fp32[output_off] += bias_fp32[i]; } } } //process activation relu if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } //process activation relu6 if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } } static void sgemm_uint8(struct ir_tensor *input, struct ir_tensor *filter, struct ir_tensor *bias, struct ir_tensor *output, struct conv_priv_info *priv_info, struct conv_param *param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float *interleave_fp32 = (float *)priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float *im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; uint8_t *output_uint8 = (uint8_t *) output->data + n * out_image_size + outchan_g * group * out_h * out_w; int *bias_int32 = NULL; float bias_scale = 0. f; if (bias) { bias_int32 = (int *)bias->data + outchan_g * group; bias_scale = input->scale * filter->scale; } float *filter_sgemm = interleave_fp32; float *input_sgemm_pack4 = im2col_pack4_fp32; float *output_sgemm = (float *)sys_malloc(outchan_g * out_h * out_w * sizeof(float)); sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); /* process bias */ if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_sgemm[output_off] += (float)bias_int32[i] * bias_scale; } } } /* process activation relu */ if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; if (output_sgemm[output_off] > 6) output_sgemm[output_off] = 6; } } } /* quant from fp32 to uint8 */ for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int udata = (int)(round(output_sgemm[output_off] / output->scale) + output->zero_point); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[output_off] = udata; } } sys_free(output_sgemm); } static void sgemm_int8(struct ir_tensor *input, struct ir_tensor *filter, struct ir_tensor *bias, struct ir_tensor *output, struct conv_priv_info *priv_info, struct conv_param *param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; int8_t *interleave_int8 = (int8_t *) priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; int8_t *im2col_pack4_int8 = priv_info->im2col_buffer_pack4; int8_t *output_int8 = (int8_t *) output->data + n * out_image_size + outchan_g * group * out_h * out_w; int32_t *bias_int32 = NULL; if (bias) bias_int32 = (int *)bias->data + outchan_g * group; float input_scale = input->scale; float *kernel_scales = filter->scale_list; float output_scale = output->scale; int8_t *filter_sgemm = interleave_int8; int8_t *input_sgemm_pack4 = im2col_pack4_int8; int32_t *output_sgemm_int32 = (int32_t *) sys_malloc(outchan_g * out_h * out_w * sizeof(int32_t)); float *output_sgemm_fp32 = (float *)sys_malloc(outchan_g * out_h * out_w * sizeof(float)); sgemm_i8(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm_int32, num_thread); /* process bias and dequant output from int32 to fp32 */ for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (bias) output_sgemm_fp32[output_off] = (float)(output_sgemm_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_sgemm_fp32[output_off] = (float)output_sgemm_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; if (output_sgemm_fp32[output_off] > 6) output_sgemm_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int32_t data_i32 = (int32_t) (round(output_sgemm_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t) data_i32; } } sys_free(output_sgemm_int32); sys_free(output_sgemm_fp32); } /* check the conv wheather need to be using winograd */ static int winograd_support(struct conv_param *param, int in_h, int in_w) { int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int input_chan = param->input_channel; int output_chan = param->output_channel; int group = param->group; if (in_h <= 10 && in_w <= 10) return 0; if (group != 1 || kernel_h != 3 || kernel_w != 3 || stride_h != 1 || stride_w != 1 || dilation_h != 1 || dilation_w != 1 || input_chan < 16 || output_chan < 16 || output_chan % 16) return 0; return 1; } int conv_hcl_get_shared_mem_size(struct ir_tensor *input, struct ir_tensor *output, struct conv_param *param) { int group = param->group; int input_chan = param->input_channel / group; int kernel_size = input_chan * param->kernel_h * param->kernel_w; int output_xy = output->dims[2] * output->dims[3]; int elem_size = input->elem_size; //simulator uint8 inference with fp32 if (input->data_type == TENGINE_DT_UINT8) elem_size = 4; return elem_size * output_xy * kernel_size; } int conv_hcl_get_shared_pack4_mem_size(struct ir_tensor *filter, struct ir_tensor *output, struct conv_param *param) { int K = filter->elem_num / filter->dims[0]; int N = output->dims[2] * output->dims[3]; int elem_size = filter->elem_size; //simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; return (8 * K * (N / 8 + N % 8)) * elem_size; } int conv_hcl_get_interleave_pack4_size(int M, int K, struct ir_tensor *filter) { int elem_size = filter->elem_size; //simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; int size = 8 * K * (M / 8 + (M % 8) / 4 + M % 4) * elem_size; return size; } void conv_hcl_interleave_pack4_fp32(int M, int K, struct conv_priv_info *priv_info) { float *pA = (float *)priv_info->interleave_buffer; float *pA_t = (float *)priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const float *k0 = pA + (p + 0) * K; const float *k1 = pA + (p + 1) * K; const float *k2 = pA + (p + 2) * K; const float *k3 = pA + (p + 3) * K; const float *k4 = pA + (p + 4) * K; const float *k5 = pA + (p + 5) * K; const float *k6 = pA + (p + 6) * K; const float *k7 = pA + (p + 7) * K; float *ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const float *k0 = pA + (p + 0) * K; const float *k1 = pA + (p + 1) * K; const float *k2 = pA + (p + 2) * K; const float *k3 = pA + (p + 3) * K; float *ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const float *k0 = pA + (p + 0) * K; float *ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } void conv_hcl_interleave_pack4_int8(int M, int K, struct conv_priv_info *priv_info) { int8_t *pA = (int8_t *) priv_info->interleave_buffer; int8_t *pA_t = (int8_t *) priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const int8_t *k0 = pA + (p + 0) * K; const int8_t *k1 = pA + (p + 1) * K; const int8_t *k2 = pA + (p + 2) * K; const int8_t *k3 = pA + (p + 3) * K; const int8_t *k4 = pA + (p + 4) * K; const int8_t *k5 = pA + (p + 5) * K; const int8_t *k6 = pA + (p + 6) * K; const int8_t *k7 = pA + (p + 7) * K; int8_t *ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const int8_t *k0 = pA + (p + 0) * K; const int8_t *k1 = pA + (p + 1) * K; const int8_t *k2 = pA + (p + 2) * K; const int8_t *k3 = pA + (p + 3) * K; int8_t *ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const int8_t *k0 = pA + (p + 0) * K; int8_t *ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } int conv_hcl_prerun(struct ir_tensor *input_tensor, struct ir_tensor *filter_tensor, struct ir_tensor *output_tensor, struct conv_priv_info *priv_info, struct conv_param *param) { int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; /* check winograd implement, only for conv3x3s1 */ if (input_tensor->data_type == TENGINE_DT_FP32) { priv_info->winograd = winograd_support(param, in_h, in_w); if (priv_info->winograd) { return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param); } } if (!priv_info->external_im2col_mem) { int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param); void *mem = sys_malloc(mem_size); priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; } if (!priv_info->external_im2col_pack4_mem) { int mem_size = conv_hcl_get_shared_pack4_mem_size(filter_tensor, output_tensor, param); void *mem = sys_malloc(mem_size); priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; } if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor); void *mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } if (input_tensor->data_type == TENGINE_DT_UINT8) interleave_uint8(filter_tensor, priv_info); else interleave(filter_tensor, priv_info); if (priv_info->external_interleave_pack4_mem) { int M = filter_tensor->dims[0]; int K = filter_tensor->elem_num / filter_tensor->dims[0]; int mem_size = conv_hcl_get_interleave_pack4_size(M, K, filter_tensor); void *mem = sys_malloc(mem_size); priv_info->interleave_buffer_pack4 = mem; priv_info->interleave_buffer_pack4_size = mem_size; if (input_tensor->data_type == TENGINE_DT_FP32 || input_tensor->data_type == TENGINE_DT_UINT8) conv_hcl_interleave_pack4_fp32(M, K, priv_info); else conv_hcl_interleave_pack4_int8(M, K, priv_info); if (!priv_info->external_interleave_mem && priv_info->interleave_buffer) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } } else { priv_info->interleave_buffer_pack4 = priv_info->interleave_buffer; priv_info->interleave_buffer_pack4_size = priv_info->interleave_buffer_size; } return 0; } int conv_hcl_postrun(struct conv_priv_info *priv_info) { if (priv_info->winograd) { return wino_conv_hcl_postrun(priv_info); } if (priv_info->external_interleave_pack4_mem && !priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL) { sys_free(priv_info->im2col_buffer); priv_info->im2col_buffer = NULL; } if (!priv_info->external_im2col_pack4_mem && priv_info->im2col_buffer_pack4 != NULL) { sys_free(priv_info->im2col_buffer_pack4); priv_info->im2col_buffer_pack4 = NULL; } if (priv_info->external_interleave_pack4_mem && priv_info->interleave_buffer_pack4 != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } return 0; } int conv_hcl_run(struct ir_tensor *input_tensor, struct ir_tensor *filter_tensor, struct ir_tensor *bias_tensor, struct ir_tensor *output_tensor, struct conv_priv_info *priv_info, struct conv_param *param, int num_thread, int cpu_affinity) { int group = param->group; int type = input_tensor->data_type; if (priv_info->winograd) { return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity); } for (int i = 0; i < input_tensor->dims[0]; i++) //batch size { for (int j = 0; j < group; j++) { im2col_ir(input_tensor, output_tensor, priv_info, param, i, j); int K = filter_tensor->elem_num / filter_tensor->dims[0]; int N = output_tensor->dims[2] * output_tensor->dims[3]; void *im2col_buffer = priv_info->im2col_buffer; if (priv_info->external_interleave_pack4_mem) { if (type == TENGINE_DT_FP32 || type == TENGINE_DT_UINT8) input_pack4_fp32(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); else input_pack4_int8(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); } else { priv_info->im2col_buffer_pack4 = im2col_buffer; } if (type == TENGINE_DT_FP32) sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_UINT8) sgemm_uint8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_INT8) sgemm_int8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else { printf("Input data type %d not to be supported.\n", input_tensor->data_type); return -1; } } } return 0; } int conv_hcl_set_shared_mem(struct conv_priv_info *priv_info, void *mem, int mem_size) { priv_info->external_im2col_mem = 1; priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; return 0; } int conv_hcl_set_shared_pack4_mem(struct conv_priv_info *priv_info, void *mem, int mem_size) { priv_info->external_im2col_pack4_mem = 1; priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; return 0; }
/* * Copyright (c) 2020, OPEN AI LAB Author: quanwang@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include "conv_kernel_x86.h" #include "wino_conv_kernel_x86.h" #if __AVX__ #include <immintrin.h> #endif #ifndef _MSC_VER #include <sys/time.h> #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) #endif static int get_private_mem_size(struct ir_tensor *filter) { if (filter->data_type == TENGINE_DT_UINT8) //simulator uint8 inference with fp32 return filter->elem_num * filter->elem_size * 4; else return filter->elem_num * filter->elem_size; //caution } static void interleave(struct ir_tensor *filter, struct conv_priv_info *priv_info) { /* simply copy the data */ memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size); } static void interleave_uint8(struct ir_tensor *filter, struct conv_priv_info *priv_info) { /* dequant uint8 weight to fp32 for simulator */ float *weight_fp32 = (float *)priv_info->interleave_buffer; uint8_t *weight_uint8 = (uint8_t *) filter->data; float scale = filter->scale; int zero_point = filter->zero_point; for (int i = 0; i < filter->elem_num; i++) { weight_fp32[i] = ((float)weight_uint8[i] - (float)zero_point) * scale; } } void im2col_fp32(float *data_img, float *data_col, int inh, int inw, int inc, int outh, int outw, int ksize_h, int ksize_w, int sh, int sw, int ph, int pw, int dh, int dw) { const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float *out = data_col + (c * outh + h) * outw; const float *end = out + w_high; if (im_row >= 0 && im_row < inh) { float *in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; *(out++) = *in; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_uint8(uint8_t * data_img, float *data_col, struct ir_tensor *input_tensor, struct ir_tensor *output_tensor, struct conv_param *param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; float scale = input_tensor->scale; int zero_point = input_tensor->zero_point; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float *out = data_col + (c * outh + h) * outw; const float *end = out + w_high; if (im_row >= 0 && im_row < inh) { uint8_t *in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; float in_fp32 = ((float)in[0] - (float)zero_point) * scale; out[0] = in_fp32; out++; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_int8(int8_t * data_img, int8_t * data_col, struct ir_tensor *input_tensor, struct ir_tensor *output_tensor, struct conv_param *param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; int8_t *out = data_col + (c * outh + h) * outw; const int8_t *end = out + w_high; if (im_row >= 0 && im_row < inh) { int8_t *in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(int8_t)); out += w_low; while (out < end) { in += sw; out[0] = in[0]; out++; } memset(out, 0, (outw - w_high) * sizeof(int8_t)); } else { memset(out, 0, outw * sizeof(int8_t)); } } } } static void im2col_ir(struct ir_tensor *input, struct ir_tensor *output, struct conv_priv_info *priv_info, struct conv_param *param, int n, int group) { int input_chan = param->input_channel / param->group; int image_size = input->dims[1] * input->dims[2] * input->dims[3]; int group_size = input_chan * input->dims[2] * input->dims[3]; void *input_base = (void *)((uint8_t *) input->data + (n * image_size + group * group_size) * input->elem_size); void *im2col_buf = (void *)priv_info->im2col_buffer; if (input->data_type == TENGINE_DT_FP32) { im2col_fp32(input_base, im2col_buf, input->dims[2], input->dims[3], input_chan, output->dims[2], output->dims[3], param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->dilation_h, param->dilation_w); } else if (input->data_type == TENGINE_DT_UINT8) { im2col_uint8(input_base, im2col_buf, input, output, param); } else if (input->data_type == TENGINE_DT_INT8) { im2col_int8(input_base, im2col_buf, input, output, param); } else { printf("Input data type %d not to be supported.\n", input->data_type); } } void input_pack4_fp32(int K, int N, float *pB, float *pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; //[ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const float *img = pB + i; float *tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { #if __AVX__ _mm256_storeu_ps(tmp, _mm256_loadu_ps(img)); #else tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; #endif /* // __SSE__ */ tmp += 8; img += N; } } //[ch00, ch01, ch02, ch03....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const float *img = pB + i; float *tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_fp(int M, int N, int K, float *pA_t, float *pB_t, float *pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(num_thread) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; float *output0 = pC + (i) * N; float *output1 = pC + (i + 1) * N; float *output2 = pC + (i + 2) * N; float *output3 = pC + (i + 3) * N; float *output4 = pC + (i + 4) * N; float *output5 = pC + (i + 5) * N; float *output6 = pC + (i + 6) * N; float *output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { float *va = pA_t + (i / 8) * 8 * K; float *vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); __m256 _sum4 = _mm256_set1_ps(0.0); __m256 _sum5 = _mm256_set1_ps(0.0); __m256 _sum6 = _mm256_set1_ps(0.0); __m256 _sum7 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { //k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); //sum0 = (a00 - a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); //sum1 = (a00 - a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); //sum2 = (a00 - a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); //sum3 = (a00 - a07) * k30 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); //sum4 = (a00 - a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); //sum5 = (a00 - a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); //sum6 = (a00 - a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); //sum7 = (a00 - a07) * k70 va += 8; //k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); //sum0 += (a10 - a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); //sum1 += (a10 - a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); //sum2 += (a10 - a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); //sum3 += (a10 - a17) * k31 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); //sum4 += (a10 - a17) * k41 _sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); //sum5 += (a10 - a17) * k51 _sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); //sum6 += (a10 - a17) * k61 _sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); //sum7 += (a10 - a17) * k71 va += 8; //k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); //sum0 += (a20 - a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); //sum1 += (a20 - a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); //sum2 += (a20 - a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); //sum3 += (a20 - a27) * k32 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); //sum4 += (a20 - a27) * k42 _sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); //sum5 += (a20 - a27) * k52 _sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); //sum6 += (a20 - a27) * k62 _sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); //sum7 += (a20 - a27) * k72 va += 8; //k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); //sum0 += (a30 - a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); //sum1 += (a30 - a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); //sum2 += (a30 - a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); //sum3 += (a30 - a37) * k33 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); //sum4 += (a30 - a37) * k43 _sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); //sum5 += (a30 - a37) * k53 _sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); //sum6 += (a30 - a37) * k63 _sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); //sum7 += (a30 - a37) * k73 va += 8; vb += 32; } for (; k < K; k++) { //k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _va4 = _mm256_broadcast_ss(va + 4); __m256 _va5 = _mm256_broadcast_ss(va + 5); __m256 _va6 = _mm256_broadcast_ss(va + 6); __m256 _va7 = _mm256_broadcast_ss(va + 7); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); //sum0 = (a00 - a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); //sum1 = (a00 - a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); //sum2 = (a00 - a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); //sum3 = (a00 - a07) * k30 _sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); //sum4 = (a00 - a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); //sum5 = (a00 - a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); //sum6 = (a00 - a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); //sum7 = (a00 - a07) * k70 va += 8; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); _mm256_storeu_ps(output4, _sum4); _mm256_storeu_ps(output5, _sum5); _mm256_storeu_ps(output6, _sum6); _mm256_storeu_ps(output7, _sum7); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; float sum4[8] = {0}; float sum5[8] = {0}; float sum6[8] = {0}; float sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } #endif /* // __AVX__ */ output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { float *va = pA_t + (i / 8) * 8 * K; float *vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m256 _sum0_7 = _mm256_set1_ps(0.0); __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _vb1 = _mm256_broadcast_ss(vb + 1); __m256 _vb2 = _mm256_broadcast_ss(vb + 2); __m256 _vb3 = _mm256_broadcast_ss(vb + 3); __m256 _va0 = _mm256_loadu_ps(va); __m256 _va1 = _mm256_loadu_ps(va + 8); __m256 _va2 = _mm256_loadu_ps(va + 16); __m256 _va3 = _mm256_loadu_ps(va + 24); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); //sum0 += (k00 - k70) * a00 _sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1); //sum1 += (k01 - k71) * a10 _sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2); //sum2 += (k02 - k72) * a20 _sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3); //sum3 += (k03 - k73) * a30 va += 32; vb += 4; } _sum0 = _mm256_add_ps(_sum0, _sum1); _sum2 = _mm256_add_ps(_sum2, _sum3); _sum0_7 = _mm256_add_ps(_sum0_7, _sum0); _sum0_7 = _mm256_add_ps(_sum0_7, _sum2); for (; k < K; k++) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _va = _mm256_loadu_ps(va); _sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7); //sum0 += (k00 - k70) * a00 va += 8; vb += 1; } float output_sum0_7[8] = {0. f}; _mm256_storeu_ps(output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; float sum4 = 0; float sum5 = 0; float sum6 = 0; float sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif /* // __AVX__ */ output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; float *output0 = pC + (i) * N; float *output1 = pC + (i + 1) * N; float *output2 = pC + (i + 2) * N; float *output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { float *va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float *vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { //k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); //sum0 = (a00 - a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); //sum1 = (a00 - a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); //sum2 = (a00 - a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); //sum3 = (a00 - a07) * k30 va += 4; //k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); //sum0 += (a10 - a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); //sum1 += (a10 - a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); //sum2 += (a10 - a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); //sum3 += (a10 - a17) * k31 va += 4; //k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); //sum0 += (a20 - a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); //sum1 += (a20 - a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); //sum2 += (a20 - a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); //sum3 += (a20 - a27) * k32 va += 4; //k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); //sum0 += (a30 - a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); //sum1 += (a30 - a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); //sum2 += (a30 - a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); //sum3 += (a30 - a37) * k33 va += 4; vb += 32; } for (; k < K; k++) { //k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); //sum0 = (a00 - a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); //sum1 = (a00 - a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); //sum2 = (a00 - a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); //sum3 = (a00 - a07) * k30 va += 4; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif /* // __AVX__ */ output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { float *va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float *vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m128 _sum0_3 = _mm_set1_ps(0.0); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va + 4); __m128 _va2 = _mm_loadu_ps(va + 8); __m128 _va3 = _mm_loadu_ps(va + 12); _sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0); //sum0 += (k00 - k30) * a00 _sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1); //sum1 += (k01 - k31) * a10 _sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2); //sum2 += (k02 - k32) * a20 _sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3); //sum3 += (k03 - k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k < K; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3); //sum0 += (k00 - k30) * a00 va += 4; vb += 1; } float output_sum0_3[4] = {0. f}; _mm_storeu_ps(output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif /* // __AVX__ */ output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; //output ch0 for (int i = remain_outch_start; i < M; i++) { float *output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { float *va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float *vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { //k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); //sum0 = (a00 - a07) * k00 _sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); //sum0 += (a10 - a17) * k01 _sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); //sum0 += (a20 - a27) * k02 _sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); //sum0 += (a30 - a37) * k03 va += 4; vb += 32; } for (; k < K; k++) { //k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); //sum0 = (a00 - a07) * k00 va += 1; vb += 8; } _mm256_storeu_ps(output, _sum0); #else float sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } #endif /* // __AVX__ */ output += 8; } for (; j < N; j++) { float *va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float *vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; #if __AVX__ __m128 _sum0 = _mm_set1_ps(0. f); for (; k + 3 < K; k += 4) { __m128 _p0 = _mm_loadu_ps(vb); __m128 _k0 = _mm_loadu_ps(va); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0)); va += 4; vb += 4; } #ifdef _WIN32 float sum0 = _sum0.m128_f32[0] + _sum0.m128_f32[1] + _sum0.m128_f32[2] + _sum0.m128_f32[3]; #else float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3]; #endif #else float sum0 = 0. f; #endif /* // __AVX__ */ for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } void input_pack4_int8(int K, int N, int8_t * pB, int8_t * pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; //[ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const int8_t *img = pB + i; int8_t *tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; tmp += 8; img += N; } } //[ch00, ch01, ch02, ch03....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const int8_t *img = pB + i; int8_t *tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_i8(int M, int N, int K, int8_t * pA_t, int8_t * pB_t, int32_t * pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(num_thread) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; int32_t *output0 = pC + (i) * N; int32_t *output1 = pC + (i + 1) * N; int32_t *output2 = pC + (i + 2) * N; int32_t *output3 = pC + (i + 3) * N; int32_t *output4 = pC + (i + 4) * N; int32_t *output5 = pC + (i + 5) * N; int32_t *output6 = pC + (i + 6) * N; int32_t *output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t *va = pA_t + (i / 8) * 8 * K; int8_t *vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); __m256i _sum4 = _mm256_set1_epi32(0); __m256i _sum5 = _mm256_set1_epi32(0); __m256i _sum6 = _mm256_set1_epi32(0); __m256i _sum7 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { //k0 __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum7); va += 8; //k1 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum7); va += 8; //k2 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum7); va += 8; //k3 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum7); va += 8; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _va4 = _mm256_set1_epi32(*(va + 4)); __m256i _va5 = _mm256_set1_epi32(*(va + 5)); __m256i _va6 = _mm256_set1_epi32(*(va + 6)); __m256i _va7 = _mm256_set1_epi32(*(va + 7)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va4), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va5), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va6), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va7), _sum7); va += 8; vb += 8; } _mm256_storeu_si256((__m256i *) output0, _sum0); _mm256_storeu_si256((__m256i *) output1, _sum1); _mm256_storeu_si256((__m256i *) output2, _sum2); _mm256_storeu_si256((__m256i *) output3, _sum3); _mm256_storeu_si256((__m256i *) output4, _sum4); _mm256_storeu_si256((__m256i *) output5, _sum5); _mm256_storeu_si256((__m256i *) output6, _sum6); _mm256_storeu_si256((__m256i *) output7, _sum7); #else int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; int32_t sum4[8] = {0}; int32_t sum5[8] = {0}; int32_t sum6[8] = {0}; int32_t sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } #endif output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { int8_t *va = pA_t + (i / 8) * 8 * K; int8_t *vb = pB_t + (j / 8 + j % 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0_7 = _mm256_set1_epi32(0); __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _vb1 = _mm256_set1_epi32(*(vb + 1)); __m256i _vb2 = _mm256_set1_epi32(*(vb + 2)); __m256i _vb3 = _mm256_set1_epi32(*(vb + 3)); __m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) va)); __m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (va + 8))); __m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (va + 16))); __m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (va + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3); va += 32; vb += 4; } _sum0 = _mm256_add_epi32(_sum0, _sum1); _sum2 = _mm256_add_epi32(_sum2, _sum3); _sum0_7 = _mm256_add_epi32(_sum0_7, _sum0); _sum0_7 = _mm256_add_epi32(_sum0_7, _sum2); for (; k < K; k++) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) va)); _sum0_7 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_7); va += 8; vb += 1; } int32_t output_sum0_7[8] = {0}; _mm256_storeu_si256((__m256i *) output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; int32_t sum4 = 0; int32_t sum5 = 0; int32_t sum6 = 0; int32_t sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; int32_t *output0 = pC + (i) * N; int32_t *output1 = pC + (i + 1) * N; int32_t *output2 = pC + (i + 2) * N; int32_t *output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t *va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t *vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = K + 4) { //k0 __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); va += 4; //k1 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3); va += 4; //k2 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3); va += 4; //k3 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3); va += 4; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); va += 4; vb += 8; } _mm256_storeu_si256((__m256i *) output0, _sum0); _mm256_storeu_si256((__m256i *) output1, _sum1); _mm256_storeu_si256((__m256i *) output2, _sum2); _mm256_storeu_si256((__m256i *) output3, _sum3); #else int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { int8_t *va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t *vb = pB_t + (j / 8 + j % 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0_3 = _mm256_set1_epi32(0); __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _vb1 = _mm256_set1_epi32(*(vb + 1)); __m256i _vb2 = _mm256_set1_epi32(*(vb + 2)); __m256i _vb3 = _mm256_set1_epi32(*(vb + 3)); __m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) va)); __m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (va + 4))); __m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (va + 8))); __m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (va + 12))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3); va += 16; vb += 4; } _sum0 = _mm256_add_epi32(_sum0, _sum1); _sum2 = _mm256_add_epi32(_sum2, _sum3); _sum0_3 = _mm256_add_epi32(_sum0_3, _sum0); _sum0_3 = _mm256_add_epi32(_sum0_3, _sum2); for (; k < K; k++) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) va)); _sum0_3 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_3); va += 4; vb += 1; } //drop last 4 value int32_t output_sum0_3[4] = { 0 }; _mm256_storeu_si256((__m256i *) output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; //output ch0 for (int i = remain_outch_start; i < M; i++) { int32_t *output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t *va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t *vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum0); va += 4; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); va += 1; vb += 8; } _mm256_storeu_si256((__m256i *) output, _sum0); #else int32_t sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } #endif output += 8; } for (; j < N; j++) { int8_t *va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t *vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; int32_t sum0 = 0. f; for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } static void sgemm_fp32(struct ir_tensor *input, struct ir_tensor *filter, struct ir_tensor *bias, struct ir_tensor *output, struct conv_priv_info *priv_info, struct conv_param *param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float *interleave_fp32 = (float *)priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float *im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; float *output_fp32 = (float *)output->data + n * out_image_size + outchan_g * group * out_h * out_w; float *bias_fp32 = NULL; if (bias) bias_fp32 = (float *)bias->data + outchan_g * group; float *filter_sgemm = interleave_fp32; float *input_sgemm_pack4 = im2col_pack4_fp32; float *output_sgemm = output_fp32; sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); //process bias if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_fp32[output_off] += bias_fp32[i]; } } } //process activation relu if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } //process activation relu6 if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } } static void sgemm_uint8(struct ir_tensor *input, struct ir_tensor *filter, struct ir_tensor *bias, struct ir_tensor *output, struct conv_priv_info *priv_info, struct conv_param *param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float *interleave_fp32 = (float *)priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float *im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; uint8_t *output_uint8 = (uint8_t *) output->data + n * out_image_size + outchan_g * group * out_h * out_w; int *bias_int32 = NULL; float bias_scale = 0. f; if (bias) { bias_int32 = (int *)bias->data + outchan_g * group; bias_scale = input->scale * filter->scale; } float *filter_sgemm = interleave_fp32; float *input_sgemm_pack4 = im2col_pack4_fp32; float *output_sgemm = (float *)sys_malloc(outchan_g * out_h * out_w * sizeof(float)); sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); /* process bias */ if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_sgemm[output_off] += (float)bias_int32[i] * bias_scale; } } } /* process activation relu */ if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; if (output_sgemm[output_off] > 6) output_sgemm[output_off] = 6; } } } /* quant from fp32 to uint8 */ for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int udata = (int)(round(output_sgemm[output_off] / output->scale) + output->zero_point); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[output_off] = udata; } } sys_free(output_sgemm); } static void sgemm_int8(struct ir_tensor *input, struct ir_tensor *filter, struct ir_tensor *bias, struct ir_tensor *output, struct conv_priv_info *priv_info, struct conv_param *param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; int8_t *interleave_int8 = (int8_t *) priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; int8_t *im2col_pack4_int8 = priv_info->im2col_buffer_pack4; int8_t *output_int8 = (int8_t *) output->data + n * out_image_size + outchan_g * group * out_h * out_w; int32_t *bias_int32 = NULL; if (bias) bias_int32 = (int *)bias->data + outchan_g * group; float input_scale = input->scale; float *kernel_scales = filter->scale_list; float output_scale = output->scale; int8_t *filter_sgemm = interleave_int8; int8_t *input_sgemm_pack4 = im2col_pack4_int8; int32_t *output_sgemm_int32 = (int32_t *) sys_malloc(outchan_g * out_h * out_w * sizeof(int32_t)); float *output_sgemm_fp32 = (float *)sys_malloc(outchan_g * out_h * out_w * sizeof(float)); sgemm_i8(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm_int32, num_thread); /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (bias) output_sgemm_fp32[output_off] = (float)(output_sgemm_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_sgemm_fp32[output_off] = (float)output_sgemm_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; if (output_sgemm_fp32[output_off] > 6) output_sgemm_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ for (int i = 0; i < outchan_g; i++) { #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int32_t data_i32 = (int32_t) (round(output_sgemm_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t) data_i32; } } sys_free(output_sgemm_int32); sys_free(output_sgemm_fp32); } /* check the conv wheather need to be using winograd */ static int winograd_support(struct conv_param *param, int in_h, int in_w) { int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int input_chan = param->input_channel; int output_chan = param->output_channel; int group = param->group; if (in_h <= 10 && in_w <= 10) return 0; if (group != 1 || kernel_h != 3 || kernel_w != 3 || stride_h != 1 || stride_w != 1 || dilation_h != 1 || dilation_w != 1 || input_chan < 16 || output_chan < 16 || output_chan % 16) return 0; return 1; } int conv_hcl_get_shared_mem_size(struct ir_tensor *input, struct ir_tensor *output, struct conv_param *param) { int group = param->group; int input_chan = param->input_channel / group; int kernel_size = input_chan * param->kernel_h * param->kernel_w; int output_xy = output->dims[2] * output->dims[3]; int elem_size = input->elem_size; //simulator uint8 inference with fp32 if (input->data_type == TENGINE_DT_UINT8) elem_size = 4; return elem_size * output_xy * kernel_size; } int conv_hcl_get_shared_pack4_mem_size(struct ir_tensor *filter, struct ir_tensor *output, struct conv_param *param) { int K = filter->elem_num / filter->dims[0]; int N = output->dims[2] * output->dims[3]; int elem_size = filter->elem_size; //simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; return (8 * K * (N / 8 + N % 8)) * elem_size; } int conv_hcl_get_interleave_pack4_size(int M, int K, struct ir_tensor *filter) { int elem_size = filter->elem_size; //simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; int size = 8 * K * (M / 8 + (M % 8) / 4 + M % 4) * elem_size; return size; } void conv_hcl_interleave_pack4_fp32(int M, int K, struct conv_priv_info *priv_info) { float *pA = (float *)priv_info->interleave_buffer; float *pA_t = (float *)priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const float *k0 = pA + (p + 0) * K; const float *k1 = pA + (p + 1) * K; const float *k2 = pA + (p + 2) * K; const float *k3 = pA + (p + 3) * K; const float *k4 = pA + (p + 4) * K; const float *k5 = pA + (p + 5) * K; const float *k6 = pA + (p + 6) * K; const float *k7 = pA + (p + 7) * K; float *ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const float *k0 = pA + (p + 0) * K; const float *k1 = pA + (p + 1) * K; const float *k2 = pA + (p + 2) * K; const float *k3 = pA + (p + 3) * K; float *ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const float *k0 = pA + (p + 0) * K; float *ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } void conv_hcl_interleave_pack4_int8(int M, int K, struct conv_priv_info *priv_info) { int8_t *pA = (int8_t *) priv_info->interleave_buffer; int8_t *pA_t = (int8_t *) priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const int8_t *k0 = pA + (p + 0) * K; const int8_t *k1 = pA + (p + 1) * K; const int8_t *k2 = pA + (p + 2) * K; const int8_t *k3 = pA + (p + 3) * K; const int8_t *k4 = pA + (p + 4) * K; const int8_t *k5 = pA + (p + 5) * K; const int8_t *k6 = pA + (p + 6) * K; const int8_t *k7 = pA + (p + 7) * K; int8_t *ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const int8_t *k0 = pA + (p + 0) * K; const int8_t *k1 = pA + (p + 1) * K; const int8_t *k2 = pA + (p + 2) * K; const int8_t *k3 = pA + (p + 3) * K; int8_t *ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const int8_t *k0 = pA + (p + 0) * K; int8_t *ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } int conv_hcl_prerun(struct ir_tensor *input_tensor, struct ir_tensor *filter_tensor, struct ir_tensor *output_tensor, struct conv_priv_info *priv_info, struct conv_param *param) { int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; /* check winograd implement, only for conv3x3s1 */ if (input_tensor->data_type == TENGINE_DT_FP32) { priv_info->winograd = winograd_support(param, in_h, in_w); if (priv_info->winograd) { return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param); } } if (!priv_info->external_im2col_mem) { int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param); void *mem = sys_malloc(mem_size); priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; } if (!priv_info->external_im2col_pack4_mem) { int mem_size = conv_hcl_get_shared_pack4_mem_size(filter_tensor, output_tensor, param); void *mem = sys_malloc(mem_size); priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; } if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor); void *mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } if (input_tensor->data_type == TENGINE_DT_UINT8) interleave_uint8(filter_tensor, priv_info); else interleave(filter_tensor, priv_info); if (priv_info->external_interleave_pack4_mem) { int M = filter_tensor->dims[0]; int K = filter_tensor->elem_num / filter_tensor->dims[0]; int mem_size = conv_hcl_get_interleave_pack4_size(M, K, filter_tensor); void *mem = sys_malloc(mem_size); priv_info->interleave_buffer_pack4 = mem; priv_info->interleave_buffer_pack4_size = mem_size; if (input_tensor->data_type == TENGINE_DT_FP32 || input_tensor->data_type == TENGINE_DT_UINT8) conv_hcl_interleave_pack4_fp32(M, K, priv_info); else conv_hcl_interleave_pack4_int8(M, K, priv_info); if (!priv_info->external_interleave_mem && priv_info->interleave_buffer) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } } else { priv_info->interleave_buffer_pack4 = priv_info->interleave_buffer; priv_info->interleave_buffer_pack4_size = priv_info->interleave_buffer_size; } return 0; } int conv_hcl_postrun(struct conv_priv_info *priv_info) { if (priv_info->winograd) { return wino_conv_hcl_postrun(priv_info); } if (priv_info->external_interleave_pack4_mem && !priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL) { sys_free(priv_info->im2col_buffer); priv_info->im2col_buffer = NULL; } if (!priv_info->external_im2col_pack4_mem && priv_info->im2col_buffer_pack4 != NULL) { sys_free(priv_info->im2col_buffer_pack4); priv_info->im2col_buffer_pack4 = NULL; } if (priv_info->external_interleave_pack4_mem && priv_info->interleave_buffer_pack4 != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } return 0; } int conv_hcl_run(struct ir_tensor *input_tensor, struct ir_tensor *filter_tensor, struct ir_tensor *bias_tensor, struct ir_tensor *output_tensor, struct conv_priv_info *priv_info, struct conv_param *param, int num_thread, int cpu_affinity) { int group = param->group; int type = input_tensor->data_type; if (priv_info->winograd) { return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity); } for (int i = 0; i < input_tensor->dims[0]; i++) //batch size { for (int j = 0; j < group; j++) { im2col_ir(input_tensor, output_tensor, priv_info, param, i, j); int K = filter_tensor->elem_num / filter_tensor->dims[0]; int N = output_tensor->dims[2] * output_tensor->dims[3]; void *im2col_buffer = priv_info->im2col_buffer; if (priv_info->external_interleave_pack4_mem) { if (type == TENGINE_DT_FP32 || type == TENGINE_DT_UINT8) input_pack4_fp32(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); else input_pack4_int8(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); } else { priv_info->im2col_buffer_pack4 = im2col_buffer; } if (type == TENGINE_DT_FP32) sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_UINT8) sgemm_uint8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_INT8) sgemm_int8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else { printf("Input data type %d not to be supported.\n", input_tensor->data_type); return -1; } } } return 0; } int conv_hcl_set_shared_mem(struct conv_priv_info *priv_info, void *mem, int mem_size) { priv_info->external_im2col_mem = 1; priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; return 0; } int conv_hcl_set_shared_pack4_mem(struct conv_priv_info *priv_info, void *mem, int mem_size) { priv_info->external_im2col_pack4_mem = 1; priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; return 0; }
lu.c
/*[]*/ typedef long long __int64_t; /*[]*/ typedef __int64_t __darwin_off_t; /*[]*/ typedef __darwin_off_t fpos_t; /*[]*/ struct __sbuf { unsigned char *_base; int _size; } ; /*[]*/ struct __sFILEX ; /*[]*/ struct __sFILE { unsigned char *_p; int _r; int _w; short _flags; short _file; struct __sbuf _bf; int _lbfsize; void *_cookie; int ( *_close )(void *); int ( *_read )(void *, char * , int ); fpos_t ( *_seek )(void *, fpos_t , int ); int ( *_write )(void *, const char * , int ); struct __sbuf _ub; struct __sFILEX *_extra; int _ur; unsigned char _ubuf[3]; unsigned char _nbuf[1]; struct __sbuf _lb; int _blksize; fpos_t _offset; } ; /*[]*/ typedef struct __sFILE FILE; /*[]*/ int fclose(FILE *); /*[]*/ int fgetc(FILE *); /*[]*/ FILE *fopen(const char *restrict __filename, const char *restrict __mode); /*[]*/ int fscanf(FILE *restrict , const char *restrict , ...); /*[]*/ int printf(const char *restrict , ...); /*[]*/ void exit(int ); /*[]*/ extern double fabs(double ); /*[]*/ extern double sqrt(double ); /*[]*/ extern int omp_get_num_threads(void ); /*[]*/ typedef int boolean; /*[]*/ extern void timer_clear(int ); /*[]*/ extern void timer_start(int ); /*[]*/ extern void timer_stop(int ); /*[]*/ extern double timer_read(int ); /*[]*/ extern void c_print_results(char *name, char class , int n1 , int n2 , int n3 , int niter , int nthreads , double t , double mops , char *optype , int passed_verification , char *npbversion , char *compiletime , char *cc , char *clink , char *c_lib , char *c_inc , char *cflags , char *clinkflags , char *rand); /*[]*/ static int nx; /*[]*/ static int ny; /*[]*/ static int nz; /*[]*/ static int nx0; /*[]*/ static int ny0; /*[]*/ static int nz0; /*[]*/ static int ist; /*[]*/ static int iend; /*[]*/ static int jst; /*[]*/ static int jend; /*[]*/ static int ii1; /*[]*/ static int ii2; /*[]*/ static int ji1; /*[]*/ static int ji2; /*[]*/ static int ki1; /*[]*/ static int ki2; /*[]*/ static double dxi; /*[]*/ static double deta; /*[]*/ static double dzeta; /*[]*/ static double tx1; /*[]*/ static double tx2; /*[]*/ static double tx3; /*[]*/ static double ty1; /*[]*/ static double ty2; /*[]*/ static double ty3; /*[]*/ static double tz1; /*[]*/ static double tz2; /*[]*/ static double tz3; /*[]*/ static double dx1; /*[]*/ static double dx2; /*[]*/ static double dx3; /*[]*/ static double dx4; /*[]*/ static double dx5; /*[]*/ static double dy1; /*[]*/ static double dy2; /*[]*/ static double dy3; /*[]*/ static double dy4; /*[]*/ static double dy5; /*[]*/ static double dz1; /*[]*/ static double dz2; /*[]*/ static double dz3; /*[]*/ static double dz4; /*[]*/ static double dz5; /*[]*/ static double dssp; /*[]*/ static double u[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /*[]*/ static double rsd[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /*[]*/ static double frct[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /*[]*/ static double flux[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /*[]*/ static int ipr; /*[]*/ static int inorm; /*[]*/ static int itmax; /*[]*/ static double dt; /*[]*/ static double omega; /*[]*/ static double tolrsd[5]; /*[]*/ static double rsdnm[5]; /*[]*/ static double errnm[5]; /*[]*/ static double frc; /*[]*/ static double a[12][12][5][5]; /*[]*/ static double b[12][12][5][5]; /*[]*/ static double c[12][12][5][5]; /*[]*/ static double d[12][12][5][5]; /*[]*/ static double ce[5][13]; /*[]*/ static double maxtime; /*[]*/ static boolean flag[12 / 2 * 2 + 1]; /*[]*/ static void blts(int nx, int ny , int nz , int k , double omega , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double ldz[12][12][5][5] , double ldy[12][12][5][5] , double ldx[12][12][5][5] , double d[12][12][5][5] , int ist , int iend , int jst , int jend , int nx0 , int ny0); /*[]*/ static void buts(int nx, int ny , int nz , int k , double omega , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double tv[12][12][5] , double d[12][12][5][5] , double udx[12][12][5][5] , double udy[12][12][5][5] , double udz[12][12][5][5] , int ist , int iend , int jst , int jend , int nx0 , int ny0); /*[]*/ static void domain(void ); /*[]*/ static void erhs(void ); /*[]*/ static void error(void ); /*[]*/ static void exact(int i, int j , int k , double u000ijk[5]); /*[]*/ static void jacld(int k); /*[]*/ static void jacu(int k); /*[]*/ static void l2norm(int nx0, int ny0 , int nz0 , int ist , int iend , int jst , int jend , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double sum[5]); /*[]*/ static void pintgr(void ); /*[]*/ static void read_input(void ); /*[]*/ static void rhs(void ); /*[]*/ static void setbv(void ); /*[]*/ static void setcoeff(void ); /*[]*/ static void setiv(void ); /*[]*/ static void ssor(void ); /*[]*/ static void verify(double xcr[5], double xce[5] , double xci , char *class , boolean *verified); /*[]*/ /*[]*/ /*[]*/ int main(int argc, char **argv) { /*[]*/ /*[]*/ char class; /*[]*/ boolean verified; /*[]*/ double mflops; /*[]*/ int nthreads = 1; /*[]*/ read_input(); /*[]*/ /*[]*/ domain(); /*[]*/ /*[]*/ setcoeff(); /*[]*/ /*[1]*/ #pragma omp parallel { /*[1]*/ /*[1]*/ int i; /*[1]*/ int j; /*[1]*/ int k; /*[1]*/ int iglob; /*[1]*/ int jglob; /*[1]*/ #pragma omp for nowait /*[1]*/ /*[1]*/ /*[1]*/ for (i = 0; i < nx; i++) { /*[1]*/ /*[1]*/ iglob = i; /*[1]*/ /*[1]*/ /*[1]*/ /*[1]*/ for (j = 0; j < ny; j++) { /*[1]*/ /*[1]*/ jglob = j; /*[1]*/ double *_imopVarPre239; /*[1]*/ _imopVarPre239 = &u[i][j][0][0]; /*[1]*/ exact(iglob, jglob, 0, _imopVarPre239); /*[1]*/ /*[1]*/ double *_imopVarPre242; /*[1]*/ int _imopVarPre243; /*[1]*/ _imopVarPre242 = &u[i][j][nz - 1][0]; /*[1]*/ _imopVarPre243 = nz - 1; /*[1]*/ exact(iglob, jglob, _imopVarPre243, _imopVarPre242); /*[1]*/ } } /*[1]*/ // #pragma omp dummyFlush BARRIER_START /*[1]*/ #pragma omp barrier /*[2]*/ #pragma omp for nowait /*[2]*/ /*[2]*/ /*[2]*/ for (i = 0; i < nx; i++) { /*[2]*/ /*[2]*/ iglob = i; /*[2]*/ /*[2]*/ /*[2]*/ /*[2]*/ for (k = 0; k < nz; k++) { /*[2]*/ /*[2]*/ double *_imopVarPre245; /*[2]*/ _imopVarPre245 = &u[i][0][k][0]; /*[2]*/ exact(iglob, 0, k, _imopVarPre245); /*[2]*/ } } /*[2]*/ // #pragma omp dummyFlush BARRIER_START /*[2]*/ #pragma omp barrier /*[3]*/ #pragma omp for nowait /*[3]*/ /*[3]*/ /*[3]*/ for (i = 0; i < nx; i++) { /*[3]*/ /*[3]*/ iglob = i; /*[3]*/ /*[3]*/ /*[3]*/ /*[3]*/ for (k = 0; k < nz; k++) { /*[3]*/ /*[3]*/ double *_imopVarPre248; /*[3]*/ int _imopVarPre249; /*[3]*/ _imopVarPre248 = &u[i][ny - 1][k][0]; /*[3]*/ _imopVarPre249 = ny0 - 1; /*[3]*/ exact(iglob, _imopVarPre249, k, _imopVarPre248); /*[3]*/ } } /*[3]*/ // #pragma omp dummyFlush BARRIER_START /*[3]*/ #pragma omp barrier /*[4]*/ #pragma omp for nowait /*[4]*/ /*[4]*/ /*[4]*/ for (j = 0; j < ny; j++) { /*[4]*/ /*[4]*/ jglob = j; /*[4]*/ /*[4]*/ /*[4]*/ /*[4]*/ for (k = 0; k < nz; k++) { /*[4]*/ /*[4]*/ double *_imopVarPre251; /*[4]*/ _imopVarPre251 = &u[0][j][k][0]; /*[4]*/ exact(0, jglob, k, _imopVarPre251); /*[4]*/ } } /*[4]*/ // #pragma omp dummyFlush BARRIER_START /*[4]*/ #pragma omp barrier /*[5]*/ #pragma omp for nowait /*[5]*/ /*[5]*/ /*[5]*/ for (j = 0; j < ny; j++) { /*[5]*/ /*[5]*/ jglob = j; /*[5]*/ /*[5]*/ /*[5]*/ /*[5]*/ for (k = 0; k < nz; k++) { /*[5]*/ /*[5]*/ double *_imopVarPre254; /*[5]*/ int _imopVarPre255; /*[5]*/ _imopVarPre254 = &u[nx - 1][j][k][0]; /*[5]*/ _imopVarPre255 = nx0 - 1; /*[5]*/ exact(_imopVarPre255, jglob, k, _imopVarPre254); /*[5]*/ } } } /*[6]*/ #pragma omp parallel { /*[6]*/ /*[6]*/ int i; /*[6]*/ int j; /*[6]*/ int k; /*[6]*/ int m; /*[6]*/ int iglob; /*[6]*/ int jglob; /*[6]*/ double xi; /*[6]*/ double eta; /*[6]*/ double zeta; /*[6]*/ double pxi; /*[6]*/ double peta; /*[6]*/ double pzeta; /*[6]*/ double ue_1jk[5]; /*[6]*/ double ue_nx0jk[5]; /*[6]*/ double ue_i1k[5]; /*[6]*/ double ue_iny0k[5]; /*[6]*/ double ue_ij1[5]; /*[6]*/ double ue_ijnz[5]; /*[6]*/ #pragma omp for nowait /*[6]*/ /*[6]*/ /*[6]*/ for (j = 0; j < ny; j++) { /*[6]*/ /*[6]*/ jglob = j; /*[6]*/ /*[6]*/ /*[6]*/ /*[6]*/ for (k = 1; k < nz - 1; k++) { /*[6]*/ /*[6]*/ zeta = ((double) k) / (nz - 1); /*[6]*/ int _imopVarPre361; /*[6]*/ _imopVarPre361 = jglob != 0; /*[6]*/ /*[6]*/ if (_imopVarPre361) { /*[6]*/ /*[6]*/ _imopVarPre361 = jglob != ny0 - 1; } /*[6]*/ /*[6]*/ if (_imopVarPre361) { /*[6]*/ /*[6]*/ eta = ((double) jglob) / (ny0 - 1); /*[6]*/ /*[6]*/ /*[6]*/ /*[6]*/ for (i = 0; i < nx; i++) { /*[6]*/ /*[6]*/ iglob = i; /*[6]*/ int _imopVarPre363; /*[6]*/ _imopVarPre363 = iglob != 0; /*[6]*/ /*[6]*/ if (_imopVarPre363) { /*[6]*/ /*[6]*/ _imopVarPre363 = iglob != nx0 - 1; } /*[6]*/ /*[6]*/ if (_imopVarPre363) { /*[6]*/ /*[6]*/ xi = ((double) iglob) / (nx0 - 1); /*[6]*/ exact(0, jglob, k, ue_1jk); /*[6]*/ /*[6]*/ int _imopVarPre365; /*[6]*/ _imopVarPre365 = nx0 - 1; /*[6]*/ exact(_imopVarPre365, jglob, k, ue_nx0jk); /*[6]*/ /*[6]*/ exact(iglob, 0, k, ue_i1k); /*[6]*/ /*[6]*/ int _imopVarPre367; /*[6]*/ _imopVarPre367 = ny0 - 1; /*[6]*/ exact(iglob, _imopVarPre367, k, ue_iny0k); /*[6]*/ /*[6]*/ exact(iglob, jglob, 0, ue_ij1); /*[6]*/ /*[6]*/ int _imopVarPre369; /*[6]*/ _imopVarPre369 = nz - 1; /*[6]*/ exact(iglob, jglob, _imopVarPre369, ue_ijnz); /*[6]*/ /*[6]*/ /*[6]*/ /*[6]*/ /*[6]*/ for (m = 0; m < 5; m++) { /*[6]*/ /*[6]*/ pxi = (1.0 - xi) * ue_1jk[m] + xi * ue_nx0jk[m]; /*[6]*/ peta = (1.0 - eta) * ue_i1k[m] + eta * ue_iny0k[m]; /*[6]*/ pzeta = (1.0 - zeta) * ue_ij1[m] + zeta * ue_ijnz[m]; /*[6]*/ u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta; } } } } } } } /*[6, 7]*/ #pragma omp parallel { /*[6, 7]*/ /*[6, 7]*/ int i; /*[6, 7]*/ int j; /*[6, 7]*/ int k; /*[6, 7]*/ int m; /*[6, 7]*/ int iglob; /*[6, 7]*/ int jglob; /*[6, 7]*/ int L1; /*[6, 7]*/ int L2; /*[6, 7]*/ int ist1; /*[6, 7]*/ int iend1; /*[6, 7]*/ int jst1; /*[6, 7]*/ int jend1; /*[6, 7]*/ double dsspm; /*[6, 7]*/ double xi; /*[6, 7]*/ double eta; /*[6, 7]*/ double zeta; /*[6, 7]*/ double q; /*[6, 7]*/ double u21; /*[6, 7]*/ double u31; /*[6, 7]*/ double u41; /*[6, 7]*/ double tmp; /*[6, 7]*/ double u21i; /*[6, 7]*/ double u31i; /*[6, 7]*/ double u41i; /*[6, 7]*/ double u51i; /*[6, 7]*/ double u21j; /*[6, 7]*/ double u31j; /*[6, 7]*/ double u41j; /*[6, 7]*/ double u51j; /*[6, 7]*/ double u21k; /*[6, 7]*/ double u31k; /*[6, 7]*/ double u41k; /*[6, 7]*/ double u51k; /*[6, 7]*/ double u21im1; /*[6, 7]*/ double u31im1; /*[6, 7]*/ double u41im1; /*[6, 7]*/ double u51im1; /*[6, 7]*/ double u21jm1; /*[6, 7]*/ double u31jm1; /*[6, 7]*/ double u41jm1; /*[6, 7]*/ double u51jm1; /*[6, 7]*/ double u21km1; /*[6, 7]*/ double u31km1; /*[6, 7]*/ double u41km1; /*[6, 7]*/ double u51km1; /*[6, 7]*/ dsspm = dssp; /*[6, 7]*/ #pragma omp for nowait /*[6, 7]*/ /*[6, 7]*/ /*[6, 7]*/ for (i = 0; i < nx; i++) { /*[6, 7]*/ /*[6, 7]*/ /*[6, 7]*/ /*[6, 7]*/ /*[6, 7]*/ for (j = 0; j < ny; j++) { /*[6, 7]*/ /*[6, 7]*/ /*[6, 7]*/ /*[6, 7]*/ /*[6, 7]*/ for (k = 0; k < nz; k++) { /*[6, 7]*/ /*[6, 7]*/ /*[6, 7]*/ /*[6, 7]*/ /*[6, 7]*/ for (m = 0; m < 5; m++) { /*[6, 7]*/ /*[6, 7]*/ frct[i][j][k][m] = 0.0; } } } } /*[6, 7]*/ #pragma omp for nowait /*[6, 7]*/ /*[6, 7]*/ /*[6, 7]*/ for (i = 0; i < nx; i++) { /*[6, 7]*/ /*[6, 7]*/ iglob = i; /*[6, 7]*/ xi = ((double) iglob) / (nx0 - 1); /*[6, 7]*/ /*[6, 7]*/ /*[6, 7]*/ /*[6, 7]*/ for (j = 0; j < ny; j++) { /*[6, 7]*/ /*[6, 7]*/ jglob = j; /*[6, 7]*/ eta = ((double) jglob) / (ny0 - 1); /*[6, 7]*/ /*[6, 7]*/ /*[6, 7]*/ /*[6, 7]*/ for (k = 0; k < nz; k++) { /*[6, 7]*/ /*[6, 7]*/ zeta = ((double) k) / (nz - 1); /*[6, 7]*/ /*[6, 7]*/ /*[6, 7]*/ /*[6, 7]*/ for (m = 0; m < 5; m++) { /*[6, 7]*/ /*[6, 7]*/ rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } } } /*[6, 7]*/ // #pragma omp dummyFlush BARRIER_START /*[6, 7]*/ #pragma omp barrier /*[6, 8]*/ L1 = 0; /*[6, 8]*/ L2 = nx - 1; /*[6, 8]*/ #pragma omp for nowait /*[6, 8]*/ /*[6, 8]*/ /*[6, 8]*/ for (i = L1; i <= L2; i++) { /*[6, 8]*/ /*[6, 8]*/ /*[6, 8]*/ /*[6, 8]*/ /*[6, 8]*/ for (j = jst; j <= jend; j++) { /*[6, 8]*/ /*[6, 8]*/ /*[6, 8]*/ /*[6, 8]*/ /*[6, 8]*/ for (k = 1; k < nz - 1; k++) { /*[6, 8]*/ /*[6, 8]*/ flux[i][j][k][0] = rsd[i][j][k][1]; /*[6, 8]*/ u21 = rsd[i][j][k][1] / rsd[i][j][k][0]; /*[6, 8]*/ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /*[6, 8]*/ flux[i][j][k][1] = rsd[i][j][k][1] * u21 + 0.40e+00 * (rsd[i][j][k][4] - q); /*[6, 8]*/ flux[i][j][k][2] = rsd[i][j][k][2] * u21; /*[6, 8]*/ flux[i][j][k][3] = rsd[i][j][k][3] * u21; /*[6, 8]*/ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21; } } } /*[6, 8]*/ // #pragma omp dummyFlush BARRIER_START /*[6, 8]*/ #pragma omp barrier /*[6, 9]*/ #pragma omp for nowait /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ for (j = jst; j <= jend; j++) { /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ for (k = 1; k <= nz - 2; k++) { /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ for (i = ist; i <= iend; i++) { /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ for (m = 0; m < 5; m++) { /*[6, 9]*/ /*[6, 9]*/ frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]); } } /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ for (i = ist; i <= L2; i++) { /*[6, 9]*/ /*[6, 9]*/ tmp = 1.0 / rsd[i][j][k][0]; /*[6, 9]*/ u21i = tmp * rsd[i][j][k][1]; /*[6, 9]*/ u31i = tmp * rsd[i][j][k][2]; /*[6, 9]*/ u41i = tmp * rsd[i][j][k][3]; /*[6, 9]*/ u51i = tmp * rsd[i][j][k][4]; /*[6, 9]*/ tmp = 1.0 / rsd[i - 1][j][k][0]; /*[6, 9]*/ u21im1 = tmp * rsd[i - 1][j][k][1]; /*[6, 9]*/ u31im1 = tmp * rsd[i - 1][j][k][2]; /*[6, 9]*/ u41im1 = tmp * rsd[i - 1][j][k][3]; /*[6, 9]*/ u51im1 = tmp * rsd[i - 1][j][k][4]; /*[6, 9]*/ flux[i][j][k][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /*[6, 9]*/ flux[i][j][k][2] = tx3 * (u31i - u31im1); /*[6, 9]*/ flux[i][j][k][3] = tx3 * (u41i - u41im1); /*[6, 9]*/ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * ((u21i * u21i + u31i * u31i + u41i * u41i) - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + (1.0 / 6.0) * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ for (i = ist; i <= iend; i++) { /*[6, 9]*/ /*[6, 9]*/ frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * (rsd[i - 1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i + 1][j][k][0]); /*[6, 9]*/ frct[i][j][k][1] = frct[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (rsd[i - 1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i + 1][j][k][1]); /*[6, 9]*/ frct[i][j][k][2] = frct[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (rsd[i - 1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i + 1][j][k][2]); /*[6, 9]*/ frct[i][j][k][3] = frct[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (rsd[i - 1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i + 1][j][k][3]); /*[6, 9]*/ frct[i][j][k][4] = frct[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (rsd[i - 1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i + 1][j][k][4]); } /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ for (m = 0; m < 5; m++) { /*[6, 9]*/ /*[6, 9]*/ frct[1][j][k][m] = frct[1][j][k][m] - dsspm * (+5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m]); /*[6, 9]*/ frct[2][j][k][m] = frct[2][j][k][m] - dsspm * (-4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m]); } /*[6, 9]*/ ist1 = 3; /*[6, 9]*/ iend1 = nx - 4; /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ for (i = ist1; i <= iend1; i++) { /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ for (m = 0; m < 5; m++) { /*[6, 9]*/ /*[6, 9]*/ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]); } } /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ /*[6, 9]*/ for (m = 0; m < 5; m++) { /*[6, 9]*/ /*[6, 9]*/ frct[nx - 3][j][k][m] = frct[nx - 3][j][k][m] - dsspm * (rsd[nx - 5][j][k][m] - 4.0 * rsd[nx - 4][j][k][m] + 6.0 * rsd[nx - 3][j][k][m] - 4.0 * rsd[nx - 2][j][k][m]); /*[6, 9]*/ frct[nx - 2][j][k][m] = frct[nx - 2][j][k][m] - dsspm * (rsd[nx - 4][j][k][m] - 4.0 * rsd[nx - 3][j][k][m] + 5.0 * rsd[nx - 2][j][k][m]); } } } /*[6, 9]*/ // #pragma omp dummyFlush BARRIER_START /*[6, 9]*/ #pragma omp barrier /*[6, 10]*/ L1 = 0; /*[6, 10]*/ L2 = ny - 1; /*[6, 10]*/ #pragma omp for nowait /*[6, 10]*/ /*[6, 10]*/ /*[6, 10]*/ for (i = ist; i <= iend; i++) { /*[6, 10]*/ /*[6, 10]*/ /*[6, 10]*/ /*[6, 10]*/ /*[6, 10]*/ for (j = L1; j <= L2; j++) { /*[6, 10]*/ /*[6, 10]*/ /*[6, 10]*/ /*[6, 10]*/ /*[6, 10]*/ for (k = 1; k <= nz - 2; k++) { /*[6, 10]*/ /*[6, 10]*/ flux[i][j][k][0] = rsd[i][j][k][2]; /*[6, 10]*/ u31 = rsd[i][j][k][2] / rsd[i][j][k][0]; /*[6, 10]*/ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /*[6, 10]*/ flux[i][j][k][1] = rsd[i][j][k][1] * u31; /*[6, 10]*/ flux[i][j][k][2] = rsd[i][j][k][2] * u31 + 0.40e+00 * (rsd[i][j][k][4] - q); /*[6, 10]*/ flux[i][j][k][3] = rsd[i][j][k][3] * u31; /*[6, 10]*/ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31; } } } /*[6, 10]*/ // #pragma omp dummyFlush BARRIER_START /*[6, 10]*/ #pragma omp barrier /*[6, 11]*/ #pragma omp for nowait /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ for (i = ist; i <= iend; i++) { /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ for (k = 1; k <= nz - 2; k++) { /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ for (j = jst; j <= jend; j++) { /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ for (m = 0; m < 5; m++) { /*[6, 11]*/ /*[6, 11]*/ frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]); } } /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ for (j = jst; j <= L2; j++) { /*[6, 11]*/ /*[6, 11]*/ tmp = 1.0 / rsd[i][j][k][0]; /*[6, 11]*/ u21j = tmp * rsd[i][j][k][1]; /*[6, 11]*/ u31j = tmp * rsd[i][j][k][2]; /*[6, 11]*/ u41j = tmp * rsd[i][j][k][3]; /*[6, 11]*/ u51j = tmp * rsd[i][j][k][4]; /*[6, 11]*/ tmp = 1.0 / rsd[i][j - 1][k][0]; /*[6, 11]*/ u21jm1 = tmp * rsd[i][j - 1][k][1]; /*[6, 11]*/ u31jm1 = tmp * rsd[i][j - 1][k][2]; /*[6, 11]*/ u41jm1 = tmp * rsd[i][j - 1][k][3]; /*[6, 11]*/ u51jm1 = tmp * rsd[i][j - 1][k][4]; /*[6, 11]*/ flux[i][j][k][1] = ty3 * (u21j - u21jm1); /*[6, 11]*/ flux[i][j][k][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /*[6, 11]*/ flux[i][j][k][3] = ty3 * (u41j - u41jm1); /*[6, 11]*/ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * ((u21j * u21j + u31j * u31j + u41j * u41j) - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + (1.0 / 6.0) * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ for (j = jst; j <= jend; j++) { /*[6, 11]*/ /*[6, 11]*/ frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * (rsd[i][j - 1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j + 1][k][0]); /*[6, 11]*/ frct[i][j][k][1] = frct[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (rsd[i][j - 1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j + 1][k][1]); /*[6, 11]*/ frct[i][j][k][2] = frct[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (rsd[i][j - 1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j + 1][k][2]); /*[6, 11]*/ frct[i][j][k][3] = frct[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (rsd[i][j - 1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j + 1][k][3]); /*[6, 11]*/ frct[i][j][k][4] = frct[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (rsd[i][j - 1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j + 1][k][4]); } /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ for (m = 0; m < 5; m++) { /*[6, 11]*/ /*[6, 11]*/ frct[i][1][k][m] = frct[i][1][k][m] - dsspm * (+5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m]); /*[6, 11]*/ frct[i][2][k][m] = frct[i][2][k][m] - dsspm * (-4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m]); } /*[6, 11]*/ jst1 = 3; /*[6, 11]*/ jend1 = ny - 4; /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ for (j = jst1; j <= jend1; j++) { /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ for (m = 0; m < 5; m++) { /*[6, 11]*/ /*[6, 11]*/ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]); } } /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ /*[6, 11]*/ for (m = 0; m < 5; m++) { /*[6, 11]*/ /*[6, 11]*/ frct[i][ny - 3][k][m] = frct[i][ny - 3][k][m] - dsspm * (rsd[i][ny - 5][k][m] - 4.0 * rsd[i][ny - 4][k][m] + 6.0 * rsd[i][ny - 3][k][m] - 4.0 * rsd[i][ny - 2][k][m]); /*[6, 11]*/ frct[i][ny - 2][k][m] = frct[i][ny - 2][k][m] - dsspm * (rsd[i][ny - 4][k][m] - 4.0 * rsd[i][ny - 3][k][m] + 5.0 * rsd[i][ny - 2][k][m]); } } } /*[6, 11]*/ // #pragma omp dummyFlush BARRIER_START /*[6, 11]*/ #pragma omp barrier /*[6, 12]*/ #pragma omp for nowait /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ for (i = ist; i <= iend; i++) { /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ for (j = jst; j <= jend; j++) { /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ for (k = 0; k <= nz - 1; k++) { /*[6, 12]*/ /*[6, 12]*/ flux[i][j][k][0] = rsd[i][j][k][3]; /*[6, 12]*/ u41 = rsd[i][j][k][3] / rsd[i][j][k][0]; /*[6, 12]*/ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /*[6, 12]*/ flux[i][j][k][1] = rsd[i][j][k][1] * u41; /*[6, 12]*/ flux[i][j][k][2] = rsd[i][j][k][2] * u41; /*[6, 12]*/ flux[i][j][k][3] = rsd[i][j][k][3] * u41 + 0.40e+00 * (rsd[i][j][k][4] - q); /*[6, 12]*/ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41; } /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ for (k = 1; k <= nz - 2; k++) { /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ for (m = 0; m < 5; m++) { /*[6, 12]*/ /*[6, 12]*/ frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]); } } /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ for (k = 1; k <= nz - 1; k++) { /*[6, 12]*/ /*[6, 12]*/ tmp = 1.0 / rsd[i][j][k][0]; /*[6, 12]*/ u21k = tmp * rsd[i][j][k][1]; /*[6, 12]*/ u31k = tmp * rsd[i][j][k][2]; /*[6, 12]*/ u41k = tmp * rsd[i][j][k][3]; /*[6, 12]*/ u51k = tmp * rsd[i][j][k][4]; /*[6, 12]*/ tmp = 1.0 / rsd[i][j][k - 1][0]; /*[6, 12]*/ u21km1 = tmp * rsd[i][j][k - 1][1]; /*[6, 12]*/ u31km1 = tmp * rsd[i][j][k - 1][2]; /*[6, 12]*/ u41km1 = tmp * rsd[i][j][k - 1][3]; /*[6, 12]*/ u51km1 = tmp * rsd[i][j][k - 1][4]; /*[6, 12]*/ flux[i][j][k][1] = tz3 * (u21k - u21km1); /*[6, 12]*/ flux[i][j][k][2] = tz3 * (u31k - u31km1); /*[6, 12]*/ flux[i][j][k][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /*[6, 12]*/ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * ((u21k * u21k + u31k * u31k + u41k * u41k) - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + (1.0 / 6.0) * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ for (k = 1; k <= nz - 2; k++) { /*[6, 12]*/ /*[6, 12]*/ frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * (rsd[i][j][k + 1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k - 1][0]); /*[6, 12]*/ frct[i][j][k][1] = frct[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (rsd[i][j][k + 1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k - 1][1]); /*[6, 12]*/ frct[i][j][k][2] = frct[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (rsd[i][j][k + 1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k - 1][2]); /*[6, 12]*/ frct[i][j][k][3] = frct[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (rsd[i][j][k + 1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k - 1][3]); /*[6, 12]*/ frct[i][j][k][4] = frct[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (rsd[i][j][k + 1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k - 1][4]); } /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ for (m = 0; m < 5; m++) { /*[6, 12]*/ /*[6, 12]*/ frct[i][j][1][m] = frct[i][j][1][m] - dsspm * (+5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m]); /*[6, 12]*/ frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (-4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]); } /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ for (k = 3; k <= nz - 4; k++) { /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ for (m = 0; m < 5; m++) { /*[6, 12]*/ /*[6, 12]*/ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]); } } /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ /*[6, 12]*/ for (m = 0; m < 5; m++) { /*[6, 12]*/ /*[6, 12]*/ frct[i][j][nz - 3][m] = frct[i][j][nz - 3][m] - dsspm * (rsd[i][j][nz - 5][m] - 4.0 * rsd[i][j][nz - 4][m] + 6.0 * rsd[i][j][nz - 3][m] - 4.0 * rsd[i][j][nz - 2][m]); /*[6, 12]*/ frct[i][j][nz - 2][m] = frct[i][j][nz - 2][m] - dsspm * (rsd[i][j][nz - 4][m] - 4.0 * rsd[i][j][nz - 3][m] + 5.0 * rsd[i][j][nz - 2][m]); } } } } /*[13]*/ #pragma omp parallel { /*[13]*/ /*[13]*/ #pragma omp master { /*[13]*/ /*[13]*/ nthreads = omp_get_num_threads(); /*[13]*/ } } /*[13]*/ int i; /*[13]*/ int j; /*[13]*/ int k; /*[13]*/ int m; /*[13]*/ int istep; /*[13]*/ double tmp; /*[13]*/ double delunm[5]; /*[13]*/ double tv[12][12][5]; /*[13]*/ tmp = 1.0 / (omega * (2.0 - omega)); /*[13, 14]*/ #pragma omp parallel private(i, j, k, m) { /*[13, 14]*/ /*[13, 14]*/ #pragma omp for nowait /*[13, 14]*/ /*[13, 14]*/ /*[13, 14]*/ for (i = 0; i < 12; i++) { /*[13, 14]*/ /*[13, 14]*/ /*[13, 14]*/ /*[13, 14]*/ /*[13, 14]*/ for (j = 0; j < 12; j++) { /*[13, 14]*/ /*[13, 14]*/ /*[13, 14]*/ /*[13, 14]*/ /*[13, 14]*/ for (k = 0; k < 5; k++) { /*[13, 14]*/ /*[13, 14]*/ /*[13, 14]*/ /*[13, 14]*/ /*[13, 14]*/ for (m = 0; m < 5; m++) { /*[13, 14]*/ /*[13, 14]*/ a[i][j][k][m] = 0.0; /*[13, 14]*/ b[i][j][k][m] = 0.0; /*[13, 14]*/ c[i][j][k][m] = 0.0; /*[13, 14]*/ d[i][j][k][m] = 0.0; } } } } } /*[13, 14, 15]*/ #pragma omp parallel { /*[13, 14, 15]*/ /*[13, 14, 15]*/ int i_imopVarPre84; /*[13, 14, 15]*/ int j_imopVarPre85; /*[13, 14, 15]*/ int k_imopVarPre86; /*[13, 14, 15]*/ int m_imopVarPre87; /*[13, 14, 15]*/ int L1; /*[13, 14, 15]*/ int L2; /*[13, 14, 15]*/ int ist1; /*[13, 14, 15]*/ int iend1; /*[13, 14, 15]*/ int jst1; /*[13, 14, 15]*/ int jend1; /*[13, 14, 15]*/ double q; /*[13, 14, 15]*/ double u21; /*[13, 14, 15]*/ double u31; /*[13, 14, 15]*/ double u41; /*[13, 14, 15]*/ double tmp_imopVarPre88; /*[13, 14, 15]*/ double u21i; /*[13, 14, 15]*/ double u31i; /*[13, 14, 15]*/ double u41i; /*[13, 14, 15]*/ double u51i; /*[13, 14, 15]*/ double u21j; /*[13, 14, 15]*/ double u31j; /*[13, 14, 15]*/ double u41j; /*[13, 14, 15]*/ double u51j; /*[13, 14, 15]*/ double u21k; /*[13, 14, 15]*/ double u31k; /*[13, 14, 15]*/ double u41k; /*[13, 14, 15]*/ double u51k; /*[13, 14, 15]*/ double u21im1; /*[13, 14, 15]*/ double u31im1; /*[13, 14, 15]*/ double u41im1; /*[13, 14, 15]*/ double u51im1; /*[13, 14, 15]*/ double u21jm1; /*[13, 14, 15]*/ double u31jm1; /*[13, 14, 15]*/ double u41jm1; /*[13, 14, 15]*/ double u51jm1; /*[13, 14, 15]*/ double u21km1; /*[13, 14, 15]*/ double u31km1; /*[13, 14, 15]*/ double u41km1; /*[13, 14, 15]*/ double u51km1; /*[13, 14, 15]*/ #pragma omp for nowait /*[13, 14, 15]*/ /*[13, 14, 15]*/ /*[13, 14, 15]*/ for (i_imopVarPre84 = 0; i_imopVarPre84 <= nx - 1; i_imopVarPre84++) { /*[13, 14, 15]*/ /*[13, 14, 15]*/ /*[13, 14, 15]*/ /*[13, 14, 15]*/ /*[13, 14, 15]*/ for (j_imopVarPre85 = 0; j_imopVarPre85 <= ny - 1; j_imopVarPre85++) { /*[13, 14, 15]*/ /*[13, 14, 15]*/ /*[13, 14, 15]*/ /*[13, 14, 15]*/ /*[13, 14, 15]*/ for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /*[13, 14, 15]*/ /*[13, 14, 15]*/ /*[13, 14, 15]*/ /*[13, 14, 15]*/ /*[13, 14, 15]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[13, 14, 15]*/ /*[13, 14, 15]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = -frct[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]; } } } } /*[13, 14, 15]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 15]*/ #pragma omp barrier /*[13, 14, 16]*/ L1 = 0; /*[13, 14, 16]*/ L2 = nx - 1; /*[13, 14, 16]*/ #pragma omp for nowait /*[13, 14, 16]*/ /*[13, 14, 16]*/ /*[13, 14, 16]*/ for (i_imopVarPre84 = L1; i_imopVarPre84 <= L2; i_imopVarPre84++) { /*[13, 14, 16]*/ /*[13, 14, 16]*/ /*[13, 14, 16]*/ /*[13, 14, 16]*/ /*[13, 14, 16]*/ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /*[13, 14, 16]*/ /*[13, 14, 16]*/ /*[13, 14, 16]*/ /*[13, 14, 16]*/ /*[13, 14, 16]*/ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /*[13, 14, 16]*/ /*[13, 14, 16]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /*[13, 14, 16]*/ u21 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[13, 14, 16]*/ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[13, 14, 16]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u21 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /*[13, 14, 16]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u21; /*[13, 14, 16]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u21; /*[13, 14, 16]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u21; } } } /*[13, 14, 16]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 16]*/ #pragma omp barrier /*[13, 14, 17]*/ #pragma omp for nowait /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[13, 14, 17]*/ /*[13, 14, 17]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tx2 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } /*[13, 14, 17]*/ L2 = nx - 1; /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ for (i_imopVarPre84 = ist; i_imopVarPre84 <= L2; i_imopVarPre84++) { /*[13, 14, 17]*/ /*[13, 14, 17]*/ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[13, 14, 17]*/ u21i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /*[13, 14, 17]*/ u31i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /*[13, 14, 17]*/ u41i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /*[13, 14, 17]*/ u51i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /*[13, 14, 17]*/ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0]; /*[13, 14, 17]*/ u21im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1]; /*[13, 14, 17]*/ u31im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2]; /*[13, 14, 17]*/ u41im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3]; /*[13, 14, 17]*/ u51im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4]; /*[13, 14, 17]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /*[13, 14, 17]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tx3 * (u31i - u31im1); /*[13, 14, 17]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = tx3 * (u41i - u41im1); /*[13, 14, 17]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /*[13, 14, 17]*/ /*[13, 14, 17]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dx1 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][0]); /*[13, 14, 17]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dx2 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1]); /*[13, 14, 17]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dx3 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2]); /*[13, 14, 17]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dx4 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3]); /*[13, 14, 17]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dx5 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4]); } /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[13, 14, 17]*/ /*[13, 14, 17]*/ rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); /*[13, 14, 17]*/ rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } /*[13, 14, 17]*/ ist1 = 3; /*[13, 14, 17]*/ iend1 = nx - 4; /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ for (i_imopVarPre84 = ist1; i_imopVarPre84 <= iend1; i_imopVarPre84++) { /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[13, 14, 17]*/ /*[13, 14, 17]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84 - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84 + 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ /*[13, 14, 17]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[13, 14, 17]*/ /*[13, 14, 17]*/ rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 5][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); /*[13, 14, 17]*/ rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } } /*[13, 14, 17]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 17]*/ #pragma omp barrier /*[13, 14, 18]*/ L1 = 0; /*[13, 14, 18]*/ L2 = ny - 1; /*[13, 14, 18]*/ #pragma omp for nowait /*[13, 14, 18]*/ /*[13, 14, 18]*/ /*[13, 14, 18]*/ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /*[13, 14, 18]*/ /*[13, 14, 18]*/ /*[13, 14, 18]*/ /*[13, 14, 18]*/ /*[13, 14, 18]*/ for (j_imopVarPre85 = L1; j_imopVarPre85 <= L2; j_imopVarPre85++) { /*[13, 14, 18]*/ /*[13, 14, 18]*/ /*[13, 14, 18]*/ /*[13, 14, 18]*/ /*[13, 14, 18]*/ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /*[13, 14, 18]*/ /*[13, 14, 18]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /*[13, 14, 18]*/ u31 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[13, 14, 18]*/ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[13, 14, 18]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u31; /*[13, 14, 18]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u31 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /*[13, 14, 18]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u31; /*[13, 14, 18]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u31; } } } /*[13, 14, 18]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 18]*/ #pragma omp barrier /*[13, 14, 19]*/ #pragma omp for nowait /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[13, 14, 19]*/ /*[13, 14, 19]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - ty2 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87]); } } /*[13, 14, 19]*/ L2 = ny - 1; /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ for (j_imopVarPre85 = jst; j_imopVarPre85 <= L2; j_imopVarPre85++) { /*[13, 14, 19]*/ /*[13, 14, 19]*/ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[13, 14, 19]*/ u21j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /*[13, 14, 19]*/ u31j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /*[13, 14, 19]*/ u41j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /*[13, 14, 19]*/ u51j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /*[13, 14, 19]*/ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0]; /*[13, 14, 19]*/ u21jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1]; /*[13, 14, 19]*/ u31jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2]; /*[13, 14, 19]*/ u41jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3]; /*[13, 14, 19]*/ u51jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4]; /*[13, 14, 19]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = ty3 * (u21j - u21jm1); /*[13, 14, 19]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /*[13, 14, 19]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = ty3 * (u41j - u41jm1); /*[13, 14, 19]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /*[13, 14, 19]*/ /*[13, 14, 19]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dy1 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][0]); /*[13, 14, 19]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dy2 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1]); /*[13, 14, 19]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dy3 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2]); /*[13, 14, 19]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dy4 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3]); /*[13, 14, 19]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dy5 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4]); } /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[13, 14, 19]*/ /*[13, 14, 19]*/ rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87]); /*[13, 14, 19]*/ rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][4][k_imopVarPre86][m_imopVarPre87]); } /*[13, 14, 19]*/ jst1 = 3; /*[13, 14, 19]*/ jend1 = ny - 4; /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ for (j_imopVarPre85 = jst1; j_imopVarPre85 <= jend1; j_imopVarPre85++) { /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[13, 14, 19]*/ /*[13, 14, 19]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85 - 2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85 + 2][k_imopVarPre86][m_imopVarPre87]); } } /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ /*[13, 14, 19]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[13, 14, 19]*/ /*[13, 14, 19]*/ rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 5][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]); /*[13, 14, 19]*/ rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]); } } } /*[13, 14, 19]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 19]*/ #pragma omp barrier /*[13, 14, 20]*/ #pragma omp for nowait /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /*[13, 14, 20]*/ /*[13, 14, 20]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /*[13, 14, 20]*/ u41 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[13, 14, 20]*/ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[13, 14, 20]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u41; /*[13, 14, 20]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u41; /*[13, 14, 20]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u41 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /*[13, 14, 20]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u41; } /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[13, 14, 20]*/ /*[13, 14, 20]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tz2 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87]); } } /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /*[13, 14, 20]*/ /*[13, 14, 20]*/ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[13, 14, 20]*/ u21k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /*[13, 14, 20]*/ u31k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /*[13, 14, 20]*/ u41k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /*[13, 14, 20]*/ u51k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /*[13, 14, 20]*/ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0]; /*[13, 14, 20]*/ u21km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1]; /*[13, 14, 20]*/ u31km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2]; /*[13, 14, 20]*/ u41km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3]; /*[13, 14, 20]*/ u51km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4]; /*[13, 14, 20]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = tz3 * (u21k - u21km1); /*[13, 14, 20]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tz3 * (u31k - u31km1); /*[13, 14, 20]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /*[13, 14, 20]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /*[13, 14, 20]*/ /*[13, 14, 20]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dz1 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][0]); /*[13, 14, 20]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dz2 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1]); /*[13, 14, 20]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dz3 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2]); /*[13, 14, 20]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dz4 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3]); /*[13, 14, 20]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dz5 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4]); } /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[13, 14, 20]*/ /*[13, 14, 20]*/ rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87]); /*[13, 14, 20]*/ rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][4][m_imopVarPre87]); } /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ for (k_imopVarPre86 = 3; k_imopVarPre86 <= nz - 4; k_imopVarPre86++) { /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[13, 14, 20]*/ /*[13, 14, 20]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 2][m_imopVarPre87]); } } /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ /*[13, 14, 20]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[13, 14, 20]*/ /*[13, 14, 20]*/ rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 5][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]); /*[13, 14, 20]*/ rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]); } } } } /*[13, 14, 21]*/ #pragma omp parallel { /*[13, 14, 21]*/ /*[13, 14, 21]*/ double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /*[13, 14, 21]*/ double *sum; /*[13, 14, 21]*/ v = rsd; /*[13, 14, 21]*/ sum = rsdnm; /*[13, 14, 21]*/ int i_imopVarPre75; /*[13, 14, 21]*/ int j_imopVarPre76; /*[13, 14, 21]*/ int k_imopVarPre77; /*[13, 14, 21]*/ int m_imopVarPre78; /*[13, 14, 21]*/ double sum0 = 0.0; /*[13, 14, 21]*/ double sum1 = 0.0; /*[13, 14, 21]*/ double sum2 = 0.0; /*[13, 14, 21]*/ double sum3 = 0.0; /*[13, 14, 21]*/ double sum4 = 0.0; /*[13, 14, 21]*/ #pragma omp single nowait { /*[13, 14, 21]*/ /*[13, 14, 21]*/ /*[13, 14, 21]*/ /*[13, 14, 21]*/ /*[13, 14, 21]*/ for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) { /*[13, 14, 21]*/ /*[13, 14, 21]*/ sum[m_imopVarPre78] = 0.0; } } /*[13, 14, 21]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 21]*/ #pragma omp barrier /*[13, 14, 22]*/ #pragma omp for nowait /*[13, 14, 22]*/ /*[13, 14, 22]*/ /*[13, 14, 22]*/ for (i_imopVarPre75 = ist; i_imopVarPre75 <= iend; i_imopVarPre75++) { /*[13, 14, 22]*/ /*[13, 14, 22]*/ /*[13, 14, 22]*/ /*[13, 14, 22]*/ /*[13, 14, 22]*/ for (j_imopVarPre76 = jst; j_imopVarPre76 <= jend; j_imopVarPre76++) { /*[13, 14, 22]*/ /*[13, 14, 22]*/ /*[13, 14, 22]*/ /*[13, 14, 22]*/ /*[13, 14, 22]*/ for (k_imopVarPre77 = 1; k_imopVarPre77 <= nz0 - 2; k_imopVarPre77++) { /*[13, 14, 22]*/ /*[13, 14, 22]*/ sum0 = sum0 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0]; /*[13, 14, 22]*/ sum1 = sum1 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1]; /*[13, 14, 22]*/ sum2 = sum2 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2]; /*[13, 14, 22]*/ sum3 = sum3 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3]; /*[13, 14, 22]*/ sum4 = sum4 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4]; } } } /*[13, 14, 22]*/ // #pragma omp dummyFlush CRITICAL_START /*[13, 14, 22]*/ #pragma omp critical { /*[13, 14, 22]*/ /*[13, 14, 22]*/ sum[0] += sum0; /*[13, 14, 22]*/ sum[1] += sum1; /*[13, 14, 22]*/ sum[2] += sum2; /*[13, 14, 22]*/ sum[3] += sum3; /*[13, 14, 22]*/ sum[4] += sum4; } /*[13, 14, 22]*/ // #pragma omp dummyFlush CRITICAL_END /*[13, 14, 22]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 22]*/ #pragma omp barrier /*[13, 14, 23]*/ #pragma omp single nowait { /*[13, 14, 23]*/ /*[13, 14, 23]*/ /*[13, 14, 23]*/ /*[13, 14, 23]*/ /*[13, 14, 23]*/ for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) { /*[13, 14, 23]*/ /*[13, 14, 23]*/ double _imopVarPre154; /*[13, 14, 23]*/ double _imopVarPre155; /*[13, 14, 23]*/ _imopVarPre154 = sum[m_imopVarPre78] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /*[13, 14, 23]*/ _imopVarPre155 = sqrt(_imopVarPre154); /*[13, 14, 23]*/ /*[13, 14, 23]*/ sum[m_imopVarPre78] = _imopVarPre155; } } } /*[13, 14]*/ timer_clear(1); /*[13, 14]*/ /*[13, 14]*/ timer_start(1); /*[13, 14]*/ /*[13, 14]*/ /*[13, 14]*/ /*[13, 14]*/ /*[13, 14]*/ for (istep = 1; istep <= itmax; istep++) { /*[13, 14]*/ /*[13, 14]*/ int _imopVarPre372; /*[13, 14]*/ int _imopVarPre370; /*[13, 14]*/ int _imopVarPre371; /*[13, 14]*/ _imopVarPre370 = istep % 20 == 0; /*[13, 14]*/ /*[13, 14]*/ if (!_imopVarPre370) { /*[13, 14]*/ /*[13, 14]*/ _imopVarPre371 = istep == itmax; /*[13, 14]*/ /*[13, 14]*/ if (!_imopVarPre371) { /*[13, 14]*/ /*[13, 14]*/ _imopVarPre371 = istep == 1; } /*[13, 14]*/ _imopVarPre370 = _imopVarPre371; } /*[13, 14]*/ /*[13, 14]*/ if (_imopVarPre370) { /*[13, 14]*/ /*[13, 14]*/ #pragma omp master { /*[13, 14]*/ /*[13, 14]*/ printf(" Time step %4d\n", istep); /*[13, 14]*/ } } /*[13, 14, 24]*/ #pragma omp parallel private(istep, i, j, k, m) { /*[13, 14, 24]*/ /*[13, 14, 24]*/ int _imopVarPre377; /*[13, 14, 24]*/ int _imopVarPre378; /*[13, 14, 24]*/ int _imopVarPre379; /*[13, 14, 24]*/ int _imopVarPre380; /*[13, 14, 24]*/ #pragma omp for nowait /*[13, 14, 24]*/ /*[13, 14, 24]*/ /*[13, 14, 24]*/ for (i = ist; i <= iend; i++) { /*[13, 14, 24]*/ /*[13, 14, 24]*/ /*[13, 14, 24]*/ /*[13, 14, 24]*/ /*[13, 14, 24]*/ for (j = jst; j <= jend; j++) { /*[13, 14, 24]*/ /*[13, 14, 24]*/ /*[13, 14, 24]*/ /*[13, 14, 24]*/ /*[13, 14, 24]*/ for (k = 1; k <= nz - 2; k++) { /*[13, 14, 24]*/ /*[13, 14, 24]*/ /*[13, 14, 24]*/ /*[13, 14, 24]*/ /*[13, 14, 24]*/ for (m = 0; m < 5; m++) { /*[13, 14, 24]*/ /*[13, 14, 24]*/ rsd[i][j][k][m] = dt * rsd[i][j][k][m]; } } } } /*[13, 14, 24]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 24]*/ #pragma omp barrier /*[13, 14, 25]*/ /*[13, 14, 25]*/ /*[13, 14, 25]*/ /*[13, 14, 25]*/ for (k = 1; k <= nz - 2; k++) { /*[13, 14, 25]*/ /*[13, 14, 25]*/ jacld(k); /*[13, 14, 25]*/ /*[13, 14, 25]*/ blts(nx, ny, nz, k, omega, rsd, a, b, c, d, ist, iend, jst, jend, nx0, ny0); /*[13, 14, 25]*/ } /*[13, 14, 25]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 25]*/ #pragma omp barrier /*[13, 14, 26]*/ /*[13, 14, 26]*/ /*[13, 14, 26]*/ /*[13, 14, 26]*/ for (k = nz - 2; k >= 1; k--) { /*[13, 14, 26]*/ /*[13, 14, 26]*/ jacu(k); /*[13, 14, 26]*/ /*[13, 14, 26]*/ buts(nx, ny, nz, k, omega, rsd, tv, d, a, b, c, ist, iend, jst, jend, nx0, ny0); /*[13, 14, 26]*/ } /*[13, 14, 26]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 26]*/ #pragma omp barrier /*[13, 14, 27]*/ #pragma omp for nowait /*[13, 14, 27]*/ /*[13, 14, 27]*/ /*[13, 14, 27]*/ for (i = ist; i <= iend; i++) { /*[13, 14, 27]*/ /*[13, 14, 27]*/ /*[13, 14, 27]*/ /*[13, 14, 27]*/ /*[13, 14, 27]*/ for (j = jst; j <= jend; j++) { /*[13, 14, 27]*/ /*[13, 14, 27]*/ /*[13, 14, 27]*/ /*[13, 14, 27]*/ /*[13, 14, 27]*/ for (k = 1; k <= nz - 2; k++) { /*[13, 14, 27]*/ /*[13, 14, 27]*/ /*[13, 14, 27]*/ /*[13, 14, 27]*/ /*[13, 14, 27]*/ for (m = 0; m < 5; m++) { /*[13, 14, 27]*/ /*[13, 14, 27]*/ u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m]; } } } } /*[13, 14, 27]*/ /*[13, 14, 27]*/ if (istep % inorm == 0) { /*[13, 14, 27]*/ /*[13, 14, 27]*/ double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /*[13, 14, 27]*/ double *sum; /*[13, 14, 27]*/ v = rsd; /*[13, 14, 27]*/ sum = delunm; /*[13, 14, 27]*/ int i_imopVarPre89; /*[13, 14, 27]*/ int j_imopVarPre90; /*[13, 14, 27]*/ int k_imopVarPre91; /*[13, 14, 27]*/ int m_imopVarPre92; /*[13, 14, 27]*/ double sum0 = 0.0; /*[13, 14, 27]*/ double sum1 = 0.0; /*[13, 14, 27]*/ double sum2 = 0.0; /*[13, 14, 27]*/ double sum3 = 0.0; /*[13, 14, 27]*/ double sum4 = 0.0; /*[13, 14, 27]*/ #pragma omp single nowait { /*[13, 14, 27]*/ /*[13, 14, 27]*/ /*[13, 14, 27]*/ /*[13, 14, 27]*/ /*[13, 14, 27]*/ for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) { /*[13, 14, 27]*/ /*[13, 14, 27]*/ sum[m_imopVarPre92] = 0.0; } } /*[13, 14, 27]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 27]*/ #pragma omp barrier /*[13, 14, 28]*/ #pragma omp for nowait /*[13, 14, 28]*/ /*[13, 14, 28]*/ /*[13, 14, 28]*/ for (i_imopVarPre89 = ist; i_imopVarPre89 <= iend; i_imopVarPre89++) { /*[13, 14, 28]*/ /*[13, 14, 28]*/ /*[13, 14, 28]*/ /*[13, 14, 28]*/ /*[13, 14, 28]*/ for (j_imopVarPre90 = jst; j_imopVarPre90 <= jend; j_imopVarPre90++) { /*[13, 14, 28]*/ /*[13, 14, 28]*/ /*[13, 14, 28]*/ /*[13, 14, 28]*/ /*[13, 14, 28]*/ for (k_imopVarPre91 = 1; k_imopVarPre91 <= nz0 - 2; k_imopVarPre91++) { /*[13, 14, 28]*/ /*[13, 14, 28]*/ sum0 = sum0 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0]; /*[13, 14, 28]*/ sum1 = sum1 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1]; /*[13, 14, 28]*/ sum2 = sum2 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2]; /*[13, 14, 28]*/ sum3 = sum3 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3]; /*[13, 14, 28]*/ sum4 = sum4 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4]; } } } /*[13, 14, 28]*/ // #pragma omp dummyFlush CRITICAL_START /*[13, 14, 28]*/ #pragma omp critical { /*[13, 14, 28]*/ /*[13, 14, 28]*/ sum[0] += sum0; /*[13, 14, 28]*/ sum[1] += sum1; /*[13, 14, 28]*/ sum[2] += sum2; /*[13, 14, 28]*/ sum[3] += sum3; /*[13, 14, 28]*/ sum[4] += sum4; } /*[13, 14, 28]*/ // #pragma omp dummyFlush CRITICAL_END /*[13, 14, 28]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 28]*/ #pragma omp barrier /*[13, 14, 29]*/ #pragma omp single nowait { /*[13, 14, 29]*/ /*[13, 14, 29]*/ /*[13, 14, 29]*/ /*[13, 14, 29]*/ /*[13, 14, 29]*/ for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) { /*[13, 14, 29]*/ /*[13, 14, 29]*/ double _imopVarPre154; /*[13, 14, 29]*/ double _imopVarPre155; /*[13, 14, 29]*/ _imopVarPre154 = sum[m_imopVarPre92] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /*[13, 14, 29]*/ _imopVarPre155 = sqrt(_imopVarPre154); /*[13, 14, 29]*/ /*[13, 14, 29]*/ sum[m_imopVarPre92] = _imopVarPre155; } } /*[13, 14, 29]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 29]*/ #pragma omp barrier /*[13, 14, 30]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 30]*/ #pragma omp barrier } /*[13, 14, 27, 31]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 27, 31]*/ #pragma omp barrier /*[13, 14, 28, 32]*/ int i_imopVarPre79; /*[13, 14, 28, 32]*/ int j_imopVarPre80; /*[13, 14, 28, 32]*/ int k_imopVarPre81; /*[13, 14, 28, 32]*/ int m_imopVarPre82; /*[13, 14, 28, 32]*/ int L1; /*[13, 14, 28, 32]*/ int L2; /*[13, 14, 28, 32]*/ int ist1; /*[13, 14, 28, 32]*/ int iend1; /*[13, 14, 28, 32]*/ int jst1; /*[13, 14, 28, 32]*/ int jend1; /*[13, 14, 28, 32]*/ double q; /*[13, 14, 28, 32]*/ double u21; /*[13, 14, 28, 32]*/ double u31; /*[13, 14, 28, 32]*/ double u41; /*[13, 14, 28, 32]*/ double tmp_imopVarPre83; /*[13, 14, 28, 32]*/ double u21i; /*[13, 14, 28, 32]*/ double u31i; /*[13, 14, 28, 32]*/ double u41i; /*[13, 14, 28, 32]*/ double u51i; /*[13, 14, 28, 32]*/ double u21j; /*[13, 14, 28, 32]*/ double u31j; /*[13, 14, 28, 32]*/ double u41j; /*[13, 14, 28, 32]*/ double u51j; /*[13, 14, 28, 32]*/ double u21k; /*[13, 14, 28, 32]*/ double u31k; /*[13, 14, 28, 32]*/ double u41k; /*[13, 14, 28, 32]*/ double u51k; /*[13, 14, 28, 32]*/ double u21im1; /*[13, 14, 28, 32]*/ double u31im1; /*[13, 14, 28, 32]*/ double u41im1; /*[13, 14, 28, 32]*/ double u51im1; /*[13, 14, 28, 32]*/ double u21jm1; /*[13, 14, 28, 32]*/ double u31jm1; /*[13, 14, 28, 32]*/ double u41jm1; /*[13, 14, 28, 32]*/ double u51jm1; /*[13, 14, 28, 32]*/ double u21km1; /*[13, 14, 28, 32]*/ double u31km1; /*[13, 14, 28, 32]*/ double u41km1; /*[13, 14, 28, 32]*/ double u51km1; /*[13, 14, 28, 32]*/ #pragma omp for nowait /*[13, 14, 28, 32]*/ /*[13, 14, 28, 32]*/ /*[13, 14, 28, 32]*/ for (i_imopVarPre79 = 0; i_imopVarPre79 <= nx - 1; i_imopVarPre79++) { /*[13, 14, 28, 32]*/ /*[13, 14, 28, 32]*/ /*[13, 14, 28, 32]*/ /*[13, 14, 28, 32]*/ /*[13, 14, 28, 32]*/ for (j_imopVarPre80 = 0; j_imopVarPre80 <= ny - 1; j_imopVarPre80++) { /*[13, 14, 28, 32]*/ /*[13, 14, 28, 32]*/ /*[13, 14, 28, 32]*/ /*[13, 14, 28, 32]*/ /*[13, 14, 28, 32]*/ for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /*[13, 14, 28, 32]*/ /*[13, 14, 28, 32]*/ /*[13, 14, 28, 32]*/ /*[13, 14, 28, 32]*/ /*[13, 14, 28, 32]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[13, 14, 28, 32]*/ /*[13, 14, 28, 32]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = -frct[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]; } } } } /*[13, 14, 28, 32]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 28, 32]*/ #pragma omp barrier /*[13, 14, 29, 33]*/ L1 = 0; /*[13, 14, 29, 33]*/ L2 = nx - 1; /*[13, 14, 29, 33]*/ #pragma omp for nowait /*[13, 14, 29, 33]*/ /*[13, 14, 29, 33]*/ /*[13, 14, 29, 33]*/ for (i_imopVarPre79 = L1; i_imopVarPre79 <= L2; i_imopVarPre79++) { /*[13, 14, 29, 33]*/ /*[13, 14, 29, 33]*/ /*[13, 14, 29, 33]*/ /*[13, 14, 29, 33]*/ /*[13, 14, 29, 33]*/ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /*[13, 14, 29, 33]*/ /*[13, 14, 29, 33]*/ /*[13, 14, 29, 33]*/ /*[13, 14, 29, 33]*/ /*[13, 14, 29, 33]*/ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /*[13, 14, 29, 33]*/ /*[13, 14, 29, 33]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /*[13, 14, 29, 33]*/ u21 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[13, 14, 29, 33]*/ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[13, 14, 29, 33]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u21 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /*[13, 14, 29, 33]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u21; /*[13, 14, 29, 33]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u21; /*[13, 14, 29, 33]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u21; } } } /*[13, 14, 29, 33]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 29, 33]*/ #pragma omp barrier /*[13, 14, 30, 34]*/ #pragma omp for nowait /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tx2 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } /*[13, 14, 30, 34]*/ L2 = nx - 1; /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ for (i_imopVarPre79 = ist; i_imopVarPre79 <= L2; i_imopVarPre79++) { /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[13, 14, 30, 34]*/ u21i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /*[13, 14, 30, 34]*/ u31i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /*[13, 14, 30, 34]*/ u41i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /*[13, 14, 30, 34]*/ u51i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /*[13, 14, 30, 34]*/ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0]; /*[13, 14, 30, 34]*/ u21im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1]; /*[13, 14, 30, 34]*/ u31im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2]; /*[13, 14, 30, 34]*/ u41im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3]; /*[13, 14, 30, 34]*/ u51im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4]; /*[13, 14, 30, 34]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /*[13, 14, 30, 34]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tx3 * (u31i - u31im1); /*[13, 14, 30, 34]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = tx3 * (u41i - u41im1); /*[13, 14, 30, 34]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dx1 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][0]); /*[13, 14, 30, 34]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dx2 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1]); /*[13, 14, 30, 34]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dx3 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2]); /*[13, 14, 30, 34]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dx4 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3]); /*[13, 14, 30, 34]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dx5 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4]); } /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); /*[13, 14, 30, 34]*/ rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } /*[13, 14, 30, 34]*/ ist1 = 3; /*[13, 14, 30, 34]*/ iend1 = nx - 4; /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ for (i_imopVarPre79 = ist1; i_imopVarPre79 <= iend1; i_imopVarPre79++) { /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79 - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79 + 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[13, 14, 30, 34]*/ /*[13, 14, 30, 34]*/ rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 5][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); /*[13, 14, 30, 34]*/ rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } } /*[13, 14, 30, 34]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 30, 34]*/ #pragma omp barrier /*[13, 14, 31, 35]*/ L1 = 0; /*[13, 14, 31, 35]*/ L2 = ny - 1; /*[13, 14, 31, 35]*/ #pragma omp for nowait /*[13, 14, 31, 35]*/ /*[13, 14, 31, 35]*/ /*[13, 14, 31, 35]*/ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /*[13, 14, 31, 35]*/ /*[13, 14, 31, 35]*/ /*[13, 14, 31, 35]*/ /*[13, 14, 31, 35]*/ /*[13, 14, 31, 35]*/ for (j_imopVarPre80 = L1; j_imopVarPre80 <= L2; j_imopVarPre80++) { /*[13, 14, 31, 35]*/ /*[13, 14, 31, 35]*/ /*[13, 14, 31, 35]*/ /*[13, 14, 31, 35]*/ /*[13, 14, 31, 35]*/ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /*[13, 14, 31, 35]*/ /*[13, 14, 31, 35]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /*[13, 14, 31, 35]*/ u31 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[13, 14, 31, 35]*/ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[13, 14, 31, 35]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u31; /*[13, 14, 31, 35]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u31 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /*[13, 14, 31, 35]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u31; /*[13, 14, 31, 35]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u31; } } } /*[13, 14, 31, 35]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 31, 35]*/ #pragma omp barrier /*[13, 14, 32, 36]*/ #pragma omp for nowait /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - ty2 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82]); } } /*[13, 14, 32, 36]*/ L2 = ny - 1; /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ for (j_imopVarPre80 = jst; j_imopVarPre80 <= L2; j_imopVarPre80++) { /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[13, 14, 32, 36]*/ u21j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /*[13, 14, 32, 36]*/ u31j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /*[13, 14, 32, 36]*/ u41j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /*[13, 14, 32, 36]*/ u51j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /*[13, 14, 32, 36]*/ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0]; /*[13, 14, 32, 36]*/ u21jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1]; /*[13, 14, 32, 36]*/ u31jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2]; /*[13, 14, 32, 36]*/ u41jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3]; /*[13, 14, 32, 36]*/ u51jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4]; /*[13, 14, 32, 36]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = ty3 * (u21j - u21jm1); /*[13, 14, 32, 36]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /*[13, 14, 32, 36]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = ty3 * (u41j - u41jm1); /*[13, 14, 32, 36]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dy1 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][0]); /*[13, 14, 32, 36]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dy2 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1]); /*[13, 14, 32, 36]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dy3 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2]); /*[13, 14, 32, 36]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dy4 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3]); /*[13, 14, 32, 36]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dy5 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4]); } /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82]); /*[13, 14, 32, 36]*/ rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][4][k_imopVarPre81][m_imopVarPre82]); } /*[13, 14, 32, 36]*/ jst1 = 3; /*[13, 14, 32, 36]*/ jend1 = ny - 4; /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ for (j_imopVarPre80 = jst1; j_imopVarPre80 <= jend1; j_imopVarPre80++) { /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80 - 2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80 + 2][k_imopVarPre81][m_imopVarPre82]); } } /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[13, 14, 32, 36]*/ /*[13, 14, 32, 36]*/ rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 5][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]); /*[13, 14, 32, 36]*/ rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]); } } } /*[13, 14, 32, 36]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 32, 36]*/ #pragma omp barrier /*[13, 14, 33, 37]*/ #pragma omp for nowait /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /*[13, 14, 33, 37]*/ u41 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[13, 14, 33, 37]*/ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[13, 14, 33, 37]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u41; /*[13, 14, 33, 37]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u41; /*[13, 14, 33, 37]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u41 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /*[13, 14, 33, 37]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u41; } /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tz2 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82]); } } /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[13, 14, 33, 37]*/ u21k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /*[13, 14, 33, 37]*/ u31k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /*[13, 14, 33, 37]*/ u41k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /*[13, 14, 33, 37]*/ u51k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /*[13, 14, 33, 37]*/ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0]; /*[13, 14, 33, 37]*/ u21km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1]; /*[13, 14, 33, 37]*/ u31km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2]; /*[13, 14, 33, 37]*/ u41km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3]; /*[13, 14, 33, 37]*/ u51km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4]; /*[13, 14, 33, 37]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = tz3 * (u21k - u21km1); /*[13, 14, 33, 37]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tz3 * (u31k - u31km1); /*[13, 14, 33, 37]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /*[13, 14, 33, 37]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dz1 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][0]); /*[13, 14, 33, 37]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dz2 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1]); /*[13, 14, 33, 37]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dz3 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2]); /*[13, 14, 33, 37]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dz4 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3]); /*[13, 14, 33, 37]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dz5 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4]); } /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82]); /*[13, 14, 33, 37]*/ rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][4][m_imopVarPre82]); } /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ for (k_imopVarPre81 = 3; k_imopVarPre81 <= nz - 4; k_imopVarPre81++) { /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 2][m_imopVarPre82]); } } /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[13, 14, 33, 37]*/ /*[13, 14, 33, 37]*/ rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 5][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]); /*[13, 14, 33, 37]*/ rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]); } } } /*[13, 14, 33, 37]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 33, 37]*/ #pragma omp barrier /*[13, 14, 34, 38]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 34, 38]*/ #pragma omp barrier /*[13, 14, 35, 39]*/ #pragma omp master { /*[13, 14, 35, 39]*/ /*[13, 14, 35, 39]*/ _imopVarPre372 = (istep % inorm == 0); /*[13, 14, 35, 39]*/ /*[13, 14, 35, 39]*/ if (!_imopVarPre372) { /*[13, 14, 35, 39]*/ /*[13, 14, 35, 39]*/ _imopVarPre372 = (istep == itmax); } } /*[13, 14, 35, 39]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 35, 39]*/ #pragma omp barrier /*[13, 14, 36, 40]*/ /*[13, 14, 36, 40]*/ if (_imopVarPre372) { /*[13, 14, 36, 40]*/ /*[13, 14, 36, 40]*/ double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /*[13, 14, 36, 40]*/ double *sum; /*[13, 14, 36, 40]*/ v = rsd; /*[13, 14, 36, 40]*/ sum = rsdnm; /*[13, 14, 36, 40]*/ int i_imopVarPre93; /*[13, 14, 36, 40]*/ int j_imopVarPre94; /*[13, 14, 36, 40]*/ int k_imopVarPre95; /*[13, 14, 36, 40]*/ int m_imopVarPre96; /*[13, 14, 36, 40]*/ double sum0 = 0.0; /*[13, 14, 36, 40]*/ double sum1 = 0.0; /*[13, 14, 36, 40]*/ double sum2 = 0.0; /*[13, 14, 36, 40]*/ double sum3 = 0.0; /*[13, 14, 36, 40]*/ double sum4 = 0.0; /*[13, 14, 36, 40]*/ #pragma omp single nowait { /*[13, 14, 36, 40]*/ /*[13, 14, 36, 40]*/ /*[13, 14, 36, 40]*/ /*[13, 14, 36, 40]*/ /*[13, 14, 36, 40]*/ for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) { /*[13, 14, 36, 40]*/ /*[13, 14, 36, 40]*/ sum[m_imopVarPre96] = 0.0; } } /*[13, 14, 36, 40]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 36, 40]*/ #pragma omp barrier /*[13, 14, 37]*/ #pragma omp for nowait /*[13, 14, 37]*/ /*[13, 14, 37]*/ /*[13, 14, 37]*/ for (i_imopVarPre93 = ist; i_imopVarPre93 <= iend; i_imopVarPre93++) { /*[13, 14, 37]*/ /*[13, 14, 37]*/ /*[13, 14, 37]*/ /*[13, 14, 37]*/ /*[13, 14, 37]*/ for (j_imopVarPre94 = jst; j_imopVarPre94 <= jend; j_imopVarPre94++) { /*[13, 14, 37]*/ /*[13, 14, 37]*/ /*[13, 14, 37]*/ /*[13, 14, 37]*/ /*[13, 14, 37]*/ for (k_imopVarPre95 = 1; k_imopVarPre95 <= nz0 - 2; k_imopVarPre95++) { /*[13, 14, 37]*/ /*[13, 14, 37]*/ sum0 = sum0 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0]; /*[13, 14, 37]*/ sum1 = sum1 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1]; /*[13, 14, 37]*/ sum2 = sum2 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2]; /*[13, 14, 37]*/ sum3 = sum3 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3]; /*[13, 14, 37]*/ sum4 = sum4 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4]; } } } /*[13, 14, 37]*/ // #pragma omp dummyFlush CRITICAL_START /*[13, 14, 37]*/ #pragma omp critical { /*[13, 14, 37]*/ /*[13, 14, 37]*/ sum[0] += sum0; /*[13, 14, 37]*/ sum[1] += sum1; /*[13, 14, 37]*/ sum[2] += sum2; /*[13, 14, 37]*/ sum[3] += sum3; /*[13, 14, 37]*/ sum[4] += sum4; } /*[13, 14, 37]*/ // #pragma omp dummyFlush CRITICAL_END /*[13, 14, 37]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 37]*/ #pragma omp barrier /*[13, 14, 38]*/ #pragma omp single nowait { /*[13, 14, 38]*/ /*[13, 14, 38]*/ /*[13, 14, 38]*/ /*[13, 14, 38]*/ /*[13, 14, 38]*/ for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) { /*[13, 14, 38]*/ /*[13, 14, 38]*/ double _imopVarPre154; /*[13, 14, 38]*/ double _imopVarPre155; /*[13, 14, 38]*/ _imopVarPre154 = sum[m_imopVarPre96] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /*[13, 14, 38]*/ _imopVarPre155 = sqrt(_imopVarPre154); /*[13, 14, 38]*/ /*[13, 14, 38]*/ sum[m_imopVarPre96] = _imopVarPre155; } } /*[13, 14, 38]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 38]*/ #pragma omp barrier /*[13, 14, 39]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 39]*/ #pragma omp barrier } /*[13, 14, 36, 40]*/ // #pragma omp dummyFlush BARRIER_START /*[13, 14, 36, 40]*/ #pragma omp barrier /*[13, 14, 37]*/ #pragma omp master { /*[13, 14, 37]*/ /*[13, 14, 37]*/ _imopVarPre377 = (rsdnm[0] < tolrsd[0]); /*[13, 14, 37]*/ /*[13, 14, 37]*/ if (_imopVarPre377) { /*[13, 14, 37]*/ /*[13, 14, 37]*/ _imopVarPre378 = (rsdnm[1] < tolrsd[1]); /*[13, 14, 37]*/ /*[13, 14, 37]*/ if (_imopVarPre378) { /*[13, 14, 37]*/ /*[13, 14, 37]*/ _imopVarPre379 = (rsdnm[2] < tolrsd[2]); /*[13, 14, 37]*/ /*[13, 14, 37]*/ if (_imopVarPre379) { /*[13, 14, 37]*/ /*[13, 14, 37]*/ _imopVarPre380 = (rsdnm[3] < tolrsd[3]); /*[13, 14, 37]*/ /*[13, 14, 37]*/ if (_imopVarPre380) { /*[13, 14, 37]*/ /*[13, 14, 37]*/ _imopVarPre380 = (rsdnm[4] < tolrsd[4]); } /*[13, 14, 37]*/ _imopVarPre379 = _imopVarPre380; } /*[13, 14, 37]*/ _imopVarPre378 = _imopVarPre379; } /*[13, 14, 37]*/ _imopVarPre377 = _imopVarPre378; } /*[13, 14, 37]*/ /*[13, 14, 37]*/ if (_imopVarPre377) { /*[13, 14, 37]*/ /*[13, 14, 37]*/ exit(1); /*[13, 14, 37]*/ } } } } /*[13, 14]*/ timer_stop(1); /*[13, 14]*/ /*[13, 14]*/ maxtime = timer_read(1); /*[13, 14]*/ /*[]*/ error(); /*[]*/ /*[]*/ pintgr(); /*[]*/ /*[]*/ int *_imopVarPre144; /*[]*/ char *_imopVarPre145; /*[]*/ _imopVarPre144 = &verified; /*[]*/ _imopVarPre145 = &class; /*[]*/ verify(rsdnm, errnm, frc, _imopVarPre145, _imopVarPre144); /*[]*/ /*[]*/ mflops = (double) itmax * (1984.77 * (double) nx0 * (double) ny0 * (double) nz0 - 10923.3 * (((double) (nx0 + ny0 + nz0) / 3.0) * ((double) (nx0 + ny0 + nz0) / 3.0)) + 27770.9 * (double) (nx0 + ny0 + nz0) / 3.0 - 144010.0) / (maxtime * 1000000.0); /*[]*/ c_print_results("LU", class, nx0, ny0, nz0, itmax, nthreads, maxtime, mflops, " floating point", verified, "3.0 structured", "21 Jul 2017", "gcc", "gcc", "(none)", "-I../common", "-O3 -fopenmp", "-O3 -fopenmp", "(none)"); /*[]*/ } /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ static void blts(int nx, int ny , int nz , int k , double omega , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double ldz[12][12][5][5] , double ldy[12][12][5][5] , double ldx[12][12][5][5] , double d[12][12][5][5] , int ist , int iend , int jst , int jend , int nx0 , int ny0) { /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ int i; /*[13, 14, 25, 41]*/ int j; /*[13, 14, 25, 41]*/ int m; /*[13, 14, 25, 41]*/ double tmp; /*[13, 14, 25, 41]*/ double tmp1; /*[13, 14, 25, 41]*/ double tmat[5][5]; /*[13, 14, 25, 41]*/ #pragma omp for nowait schedule(static) /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ for (i = ist; i <= iend; i++) { /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ for (j = jst; j <= jend; j++) { /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ for (m = 0; m < 5; m++) { /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ v[i][j][k][m] = v[i][j][k][m] - omega * (ldz[i][j][m][0] * v[i][j][k - 1][0] + ldz[i][j][m][1] * v[i][j][k - 1][1] + ldz[i][j][m][2] * v[i][j][k - 1][2] + ldz[i][j][m][3] * v[i][j][k - 1][3] + ldz[i][j][m][4] * v[i][j][k - 1][4]); } } } /*[13, 14, 25, 41]*/ #pragma omp for nowait schedule(static) /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ for (i = ist; i <= iend; i++) { /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ if (i != ist) { /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ while (flag[i - 1] == 0) { /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ // #pragma omp dummyFlush FLUSH_START /*[13, 14, 25, 41]*/ #pragma omp flush(flag) /*[13, 14, 25, 41]*/ ; } } /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ if (i != iend) { /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ while (flag[i] == 1) { /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ // #pragma omp dummyFlush FLUSH_START /*[13, 14, 25, 41]*/ #pragma omp flush(flag) /*[13, 14, 25, 41]*/ ; } } /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ for (j = jst; j <= jend; j++) { /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ for (m = 0; m < 5; m++) { /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ v[i][j][k][m] = v[i][j][k][m] - omega * (ldy[i][j][m][0] * v[i][j - 1][k][0] + ldx[i][j][m][0] * v[i - 1][j][k][0] + ldy[i][j][m][1] * v[i][j - 1][k][1] + ldx[i][j][m][1] * v[i - 1][j][k][1] + ldy[i][j][m][2] * v[i][j - 1][k][2] + ldx[i][j][m][2] * v[i - 1][j][k][2] + ldy[i][j][m][3] * v[i][j - 1][k][3] + ldx[i][j][m][3] * v[i - 1][j][k][3] + ldy[i][j][m][4] * v[i][j - 1][k][4] + ldx[i][j][m][4] * v[i - 1][j][k][4]); } /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ for (m = 0; m < 5; m++) { /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ tmat[m][0] = d[i][j][m][0]; /*[13, 14, 25, 41]*/ tmat[m][1] = d[i][j][m][1]; /*[13, 14, 25, 41]*/ tmat[m][2] = d[i][j][m][2]; /*[13, 14, 25, 41]*/ tmat[m][3] = d[i][j][m][3]; /*[13, 14, 25, 41]*/ tmat[m][4] = d[i][j][m][4]; } /*[13, 14, 25, 41]*/ tmp1 = 1.0 / tmat[0][0]; /*[13, 14, 25, 41]*/ tmp = tmp1 * tmat[1][0]; /*[13, 14, 25, 41]*/ tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; /*[13, 14, 25, 41]*/ tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; /*[13, 14, 25, 41]*/ tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; /*[13, 14, 25, 41]*/ tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; /*[13, 14, 25, 41]*/ v[i][j][k][1] = v[i][j][k][1] - v[i][j][k][0] * tmp; /*[13, 14, 25, 41]*/ tmp = tmp1 * tmat[2][0]; /*[13, 14, 25, 41]*/ tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; /*[13, 14, 25, 41]*/ tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; /*[13, 14, 25, 41]*/ tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; /*[13, 14, 25, 41]*/ tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; /*[13, 14, 25, 41]*/ v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][0] * tmp; /*[13, 14, 25, 41]*/ tmp = tmp1 * tmat[3][0]; /*[13, 14, 25, 41]*/ tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; /*[13, 14, 25, 41]*/ tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; /*[13, 14, 25, 41]*/ tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; /*[13, 14, 25, 41]*/ tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; /*[13, 14, 25, 41]*/ v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][0] * tmp; /*[13, 14, 25, 41]*/ tmp = tmp1 * tmat[4][0]; /*[13, 14, 25, 41]*/ tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; /*[13, 14, 25, 41]*/ tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; /*[13, 14, 25, 41]*/ tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; /*[13, 14, 25, 41]*/ tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; /*[13, 14, 25, 41]*/ v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][0] * tmp; /*[13, 14, 25, 41]*/ tmp1 = 1.0 / tmat[1][1]; /*[13, 14, 25, 41]*/ tmp = tmp1 * tmat[2][1]; /*[13, 14, 25, 41]*/ tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; /*[13, 14, 25, 41]*/ tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; /*[13, 14, 25, 41]*/ tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; /*[13, 14, 25, 41]*/ v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][1] * tmp; /*[13, 14, 25, 41]*/ tmp = tmp1 * tmat[3][1]; /*[13, 14, 25, 41]*/ tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; /*[13, 14, 25, 41]*/ tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; /*[13, 14, 25, 41]*/ tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; /*[13, 14, 25, 41]*/ v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][1] * tmp; /*[13, 14, 25, 41]*/ tmp = tmp1 * tmat[4][1]; /*[13, 14, 25, 41]*/ tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; /*[13, 14, 25, 41]*/ tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; /*[13, 14, 25, 41]*/ tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; /*[13, 14, 25, 41]*/ v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][1] * tmp; /*[13, 14, 25, 41]*/ tmp1 = 1.0 / tmat[2][2]; /*[13, 14, 25, 41]*/ tmp = tmp1 * tmat[3][2]; /*[13, 14, 25, 41]*/ tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; /*[13, 14, 25, 41]*/ tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; /*[13, 14, 25, 41]*/ v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][2] * tmp; /*[13, 14, 25, 41]*/ tmp = tmp1 * tmat[4][2]; /*[13, 14, 25, 41]*/ tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; /*[13, 14, 25, 41]*/ tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; /*[13, 14, 25, 41]*/ v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][2] * tmp; /*[13, 14, 25, 41]*/ tmp1 = 1.0 / tmat[3][3]; /*[13, 14, 25, 41]*/ tmp = tmp1 * tmat[4][3]; /*[13, 14, 25, 41]*/ tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; /*[13, 14, 25, 41]*/ v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][3] * tmp; /*[13, 14, 25, 41]*/ v[i][j][k][4] = v[i][j][k][4] / tmat[4][4]; /*[13, 14, 25, 41]*/ v[i][j][k][3] = v[i][j][k][3] - tmat[3][4] * v[i][j][k][4]; /*[13, 14, 25, 41]*/ v[i][j][k][3] = v[i][j][k][3] / tmat[3][3]; /*[13, 14, 25, 41]*/ v[i][j][k][2] = v[i][j][k][2] - tmat[2][3] * v[i][j][k][3] - tmat[2][4] * v[i][j][k][4]; /*[13, 14, 25, 41]*/ v[i][j][k][2] = v[i][j][k][2] / tmat[2][2]; /*[13, 14, 25, 41]*/ v[i][j][k][1] = v[i][j][k][1] - tmat[1][2] * v[i][j][k][2] - tmat[1][3] * v[i][j][k][3] - tmat[1][4] * v[i][j][k][4]; /*[13, 14, 25, 41]*/ v[i][j][k][1] = v[i][j][k][1] / tmat[1][1]; /*[13, 14, 25, 41]*/ v[i][j][k][0] = v[i][j][k][0] - tmat[0][1] * v[i][j][k][1] - tmat[0][2] * v[i][j][k][2] - tmat[0][3] * v[i][j][k][3] - tmat[0][4] * v[i][j][k][4]; /*[13, 14, 25, 41]*/ v[i][j][k][0] = v[i][j][k][0] / tmat[0][0]; } /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ if (i != ist) { /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ flag[i - 1] = 0; } /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ if (i != iend) { /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ flag[i] = 1; } /*[13, 14, 25, 41]*/ // #pragma omp dummyFlush FLUSH_START /*[13, 14, 25, 41]*/ #pragma omp flush(flag) } } /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ static void buts(int nx, int ny , int nz , int k , double omega , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double tv[12][12][5] , double d[12][12][5][5] , double udx[12][12][5][5] , double udy[12][12][5][5] , double udz[12][12][5][5] , int ist , int iend , int jst , int jend , int nx0 , int ny0) { /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ int i; /*[13, 14, 26, 42]*/ int j; /*[13, 14, 26, 42]*/ int m; /*[13, 14, 26, 42]*/ double tmp; /*[13, 14, 26, 42]*/ double tmp1; /*[13, 14, 26, 42]*/ double tmat[5][5]; /*[13, 14, 26, 42]*/ #pragma omp for nowait schedule(static) /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ for (i = iend; i >= ist; i--) { /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ for (j = jend; j >= jst; j--) { /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ for (m = 0; m < 5; m++) { /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ tv[i][j][m] = omega * (udz[i][j][m][0] * v[i][j][k + 1][0] + udz[i][j][m][1] * v[i][j][k + 1][1] + udz[i][j][m][2] * v[i][j][k + 1][2] + udz[i][j][m][3] * v[i][j][k + 1][3] + udz[i][j][m][4] * v[i][j][k + 1][4]); } } } /*[13, 14, 26, 42]*/ #pragma omp for nowait schedule(static) /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ for (i = iend; i >= ist; i--) { /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ if (i != iend) { /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ while (flag[i + 1] == 0) { /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ // #pragma omp dummyFlush FLUSH_START /*[13, 14, 26, 42]*/ #pragma omp flush(flag) /*[13, 14, 26, 42]*/ ; } } /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ if (i != ist) { /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ while (flag[i] == 1) { /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ // #pragma omp dummyFlush FLUSH_START /*[13, 14, 26, 42]*/ #pragma omp flush(flag) /*[13, 14, 26, 42]*/ ; } } /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ for (j = jend; j >= jst; j--) { /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ for (m = 0; m < 5; m++) { /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ tv[i][j][m] = tv[i][j][m] + omega * (udy[i][j][m][0] * v[i][j + 1][k][0] + udx[i][j][m][0] * v[i + 1][j][k][0] + udy[i][j][m][1] * v[i][j + 1][k][1] + udx[i][j][m][1] * v[i + 1][j][k][1] + udy[i][j][m][2] * v[i][j + 1][k][2] + udx[i][j][m][2] * v[i + 1][j][k][2] + udy[i][j][m][3] * v[i][j + 1][k][3] + udx[i][j][m][3] * v[i + 1][j][k][3] + udy[i][j][m][4] * v[i][j + 1][k][4] + udx[i][j][m][4] * v[i + 1][j][k][4]); } /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ for (m = 0; m < 5; m++) { /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ tmat[m][0] = d[i][j][m][0]; /*[13, 14, 26, 42]*/ tmat[m][1] = d[i][j][m][1]; /*[13, 14, 26, 42]*/ tmat[m][2] = d[i][j][m][2]; /*[13, 14, 26, 42]*/ tmat[m][3] = d[i][j][m][3]; /*[13, 14, 26, 42]*/ tmat[m][4] = d[i][j][m][4]; } /*[13, 14, 26, 42]*/ tmp1 = 1.0 / tmat[0][0]; /*[13, 14, 26, 42]*/ tmp = tmp1 * tmat[1][0]; /*[13, 14, 26, 42]*/ tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; /*[13, 14, 26, 42]*/ tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; /*[13, 14, 26, 42]*/ tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; /*[13, 14, 26, 42]*/ tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; /*[13, 14, 26, 42]*/ tv[i][j][1] = tv[i][j][1] - tv[i][j][0] * tmp; /*[13, 14, 26, 42]*/ tmp = tmp1 * tmat[2][0]; /*[13, 14, 26, 42]*/ tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; /*[13, 14, 26, 42]*/ tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; /*[13, 14, 26, 42]*/ tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; /*[13, 14, 26, 42]*/ tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; /*[13, 14, 26, 42]*/ tv[i][j][2] = tv[i][j][2] - tv[i][j][0] * tmp; /*[13, 14, 26, 42]*/ tmp = tmp1 * tmat[3][0]; /*[13, 14, 26, 42]*/ tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; /*[13, 14, 26, 42]*/ tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; /*[13, 14, 26, 42]*/ tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; /*[13, 14, 26, 42]*/ tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; /*[13, 14, 26, 42]*/ tv[i][j][3] = tv[i][j][3] - tv[i][j][0] * tmp; /*[13, 14, 26, 42]*/ tmp = tmp1 * tmat[4][0]; /*[13, 14, 26, 42]*/ tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; /*[13, 14, 26, 42]*/ tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; /*[13, 14, 26, 42]*/ tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; /*[13, 14, 26, 42]*/ tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; /*[13, 14, 26, 42]*/ tv[i][j][4] = tv[i][j][4] - tv[i][j][0] * tmp; /*[13, 14, 26, 42]*/ tmp1 = 1.0 / tmat[1][1]; /*[13, 14, 26, 42]*/ tmp = tmp1 * tmat[2][1]; /*[13, 14, 26, 42]*/ tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; /*[13, 14, 26, 42]*/ tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; /*[13, 14, 26, 42]*/ tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; /*[13, 14, 26, 42]*/ tv[i][j][2] = tv[i][j][2] - tv[i][j][1] * tmp; /*[13, 14, 26, 42]*/ tmp = tmp1 * tmat[3][1]; /*[13, 14, 26, 42]*/ tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; /*[13, 14, 26, 42]*/ tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; /*[13, 14, 26, 42]*/ tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; /*[13, 14, 26, 42]*/ tv[i][j][3] = tv[i][j][3] - tv[i][j][1] * tmp; /*[13, 14, 26, 42]*/ tmp = tmp1 * tmat[4][1]; /*[13, 14, 26, 42]*/ tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; /*[13, 14, 26, 42]*/ tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; /*[13, 14, 26, 42]*/ tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; /*[13, 14, 26, 42]*/ tv[i][j][4] = tv[i][j][4] - tv[i][j][1] * tmp; /*[13, 14, 26, 42]*/ tmp1 = 1.0 / tmat[2][2]; /*[13, 14, 26, 42]*/ tmp = tmp1 * tmat[3][2]; /*[13, 14, 26, 42]*/ tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; /*[13, 14, 26, 42]*/ tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; /*[13, 14, 26, 42]*/ tv[i][j][3] = tv[i][j][3] - tv[i][j][2] * tmp; /*[13, 14, 26, 42]*/ tmp = tmp1 * tmat[4][2]; /*[13, 14, 26, 42]*/ tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; /*[13, 14, 26, 42]*/ tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; /*[13, 14, 26, 42]*/ tv[i][j][4] = tv[i][j][4] - tv[i][j][2] * tmp; /*[13, 14, 26, 42]*/ tmp1 = 1.0 / tmat[3][3]; /*[13, 14, 26, 42]*/ tmp = tmp1 * tmat[4][3]; /*[13, 14, 26, 42]*/ tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; /*[13, 14, 26, 42]*/ tv[i][j][4] = tv[i][j][4] - tv[i][j][3] * tmp; /*[13, 14, 26, 42]*/ tv[i][j][4] = tv[i][j][4] / tmat[4][4]; /*[13, 14, 26, 42]*/ tv[i][j][3] = tv[i][j][3] - tmat[3][4] * tv[i][j][4]; /*[13, 14, 26, 42]*/ tv[i][j][3] = tv[i][j][3] / tmat[3][3]; /*[13, 14, 26, 42]*/ tv[i][j][2] = tv[i][j][2] - tmat[2][3] * tv[i][j][3] - tmat[2][4] * tv[i][j][4]; /*[13, 14, 26, 42]*/ tv[i][j][2] = tv[i][j][2] / tmat[2][2]; /*[13, 14, 26, 42]*/ tv[i][j][1] = tv[i][j][1] - tmat[1][2] * tv[i][j][2] - tmat[1][3] * tv[i][j][3] - tmat[1][4] * tv[i][j][4]; /*[13, 14, 26, 42]*/ tv[i][j][1] = tv[i][j][1] / tmat[1][1]; /*[13, 14, 26, 42]*/ tv[i][j][0] = tv[i][j][0] - tmat[0][1] * tv[i][j][1] - tmat[0][2] * tv[i][j][2] - tmat[0][3] * tv[i][j][3] - tmat[0][4] * tv[i][j][4]; /*[13, 14, 26, 42]*/ tv[i][j][0] = tv[i][j][0] / tmat[0][0]; /*[13, 14, 26, 42]*/ v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0]; /*[13, 14, 26, 42]*/ v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1]; /*[13, 14, 26, 42]*/ v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2]; /*[13, 14, 26, 42]*/ v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3]; /*[13, 14, 26, 42]*/ v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4]; } /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ if (i != iend) { /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ flag[i + 1] = 0; } /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ if (i != ist) { /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ flag[i] = 1; } /*[13, 14, 26, 42]*/ // #pragma omp dummyFlush FLUSH_START /*[13, 14, 26, 42]*/ #pragma omp flush(flag) } } /*[]*/ static void domain() { /*[]*/ /*[]*/ nx = nx0; /*[]*/ ny = ny0; /*[]*/ nz = nz0; /*[]*/ int _imopVarPre146; /*[]*/ int _imopVarPre147; /*[]*/ _imopVarPre146 = nx < 4; /*[]*/ /*[]*/ if (!_imopVarPre146) { /*[]*/ /*[]*/ _imopVarPre147 = ny < 4; /*[]*/ /*[]*/ if (!_imopVarPre147) { /*[]*/ /*[]*/ _imopVarPre147 = nz < 4; } /*[]*/ _imopVarPre146 = _imopVarPre147; } /*[]*/ /*[]*/ if (_imopVarPre146) { /*[]*/ /*[]*/ printf(" SUBDOMAIN SIZE IS TOO SMALL - \n" " ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n" " SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n" " TO 4 THEY ARE CURRENTLY%3d%3d%3d\n", nx, ny, nz); /*[]*/ /*[]*/ exit(1); /*[]*/ } /*[]*/ int _imopVarPre148; /*[]*/ int _imopVarPre149; /*[]*/ _imopVarPre148 = nx > 12; /*[]*/ /*[]*/ if (!_imopVarPre148) { /*[]*/ /*[]*/ _imopVarPre149 = ny > 12; /*[]*/ /*[]*/ if (!_imopVarPre149) { /*[]*/ /*[]*/ _imopVarPre149 = nz > 12; } /*[]*/ _imopVarPre148 = _imopVarPre149; } /*[]*/ /*[]*/ if (_imopVarPre148) { /*[]*/ /*[]*/ printf(" SUBDOMAIN SIZE IS TOO LARGE - \n" " ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n" " SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n" " ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n" " CURRENTLY%4d%4d%4d\n", nx, ny, nz); /*[]*/ /*[]*/ exit(1); /*[]*/ } /*[]*/ ist = 1; /*[]*/ iend = nx - 2; /*[]*/ jst = 1; /*[]*/ jend = ny - 2; } /*[]*/ static void erhs() { /*[]*/ /*[43]*/ #pragma omp parallel { /*[43]*/ /*[43]*/ int i; /*[43]*/ int j; /*[43]*/ int k; /*[43]*/ int m; /*[43]*/ int iglob; /*[43]*/ int jglob; /*[43]*/ int L1; /*[43]*/ int L2; /*[43]*/ int ist1; /*[43]*/ int iend1; /*[43]*/ int jst1; /*[43]*/ int jend1; /*[43]*/ double dsspm; /*[43]*/ double xi; /*[43]*/ double eta; /*[43]*/ double zeta; /*[43]*/ double q; /*[43]*/ double u21; /*[43]*/ double u31; /*[43]*/ double u41; /*[43]*/ double tmp; /*[43]*/ double u21i; /*[43]*/ double u31i; /*[43]*/ double u41i; /*[43]*/ double u51i; /*[43]*/ double u21j; /*[43]*/ double u31j; /*[43]*/ double u41j; /*[43]*/ double u51j; /*[43]*/ double u21k; /*[43]*/ double u31k; /*[43]*/ double u41k; /*[43]*/ double u51k; /*[43]*/ double u21im1; /*[43]*/ double u31im1; /*[43]*/ double u41im1; /*[43]*/ double u51im1; /*[43]*/ double u21jm1; /*[43]*/ double u31jm1; /*[43]*/ double u41jm1; /*[43]*/ double u51jm1; /*[43]*/ double u21km1; /*[43]*/ double u31km1; /*[43]*/ double u41km1; /*[43]*/ double u51km1; /*[43]*/ dsspm = dssp; /*[43]*/ #pragma omp for nowait /*[43]*/ /*[43]*/ /*[43]*/ for (i = 0; i < nx; i++) { /*[43]*/ /*[43]*/ /*[43]*/ /*[43]*/ /*[43]*/ for (j = 0; j < ny; j++) { /*[43]*/ /*[43]*/ /*[43]*/ /*[43]*/ /*[43]*/ for (k = 0; k < nz; k++) { /*[43]*/ /*[43]*/ /*[43]*/ /*[43]*/ /*[43]*/ for (m = 0; m < 5; m++) { /*[43]*/ /*[43]*/ frct[i][j][k][m] = 0.0; } } } } /*[43]*/ #pragma omp for nowait /*[43]*/ /*[43]*/ /*[43]*/ for (i = 0; i < nx; i++) { /*[43]*/ /*[43]*/ iglob = i; /*[43]*/ xi = ((double) iglob) / (nx0 - 1); /*[43]*/ /*[43]*/ /*[43]*/ /*[43]*/ for (j = 0; j < ny; j++) { /*[43]*/ /*[43]*/ jglob = j; /*[43]*/ eta = ((double) jglob) / (ny0 - 1); /*[43]*/ /*[43]*/ /*[43]*/ /*[43]*/ for (k = 0; k < nz; k++) { /*[43]*/ /*[43]*/ zeta = ((double) k) / (nz - 1); /*[43]*/ /*[43]*/ /*[43]*/ /*[43]*/ for (m = 0; m < 5; m++) { /*[43]*/ /*[43]*/ rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } } } /*[43]*/ // #pragma omp dummyFlush BARRIER_START /*[43]*/ #pragma omp barrier /*[44]*/ L1 = 0; /*[44]*/ L2 = nx - 1; /*[44]*/ #pragma omp for nowait /*[44]*/ /*[44]*/ /*[44]*/ for (i = L1; i <= L2; i++) { /*[44]*/ /*[44]*/ /*[44]*/ /*[44]*/ /*[44]*/ for (j = jst; j <= jend; j++) { /*[44]*/ /*[44]*/ /*[44]*/ /*[44]*/ /*[44]*/ for (k = 1; k < nz - 1; k++) { /*[44]*/ /*[44]*/ flux[i][j][k][0] = rsd[i][j][k][1]; /*[44]*/ u21 = rsd[i][j][k][1] / rsd[i][j][k][0]; /*[44]*/ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /*[44]*/ flux[i][j][k][1] = rsd[i][j][k][1] * u21 + 0.40e+00 * (rsd[i][j][k][4] - q); /*[44]*/ flux[i][j][k][2] = rsd[i][j][k][2] * u21; /*[44]*/ flux[i][j][k][3] = rsd[i][j][k][3] * u21; /*[44]*/ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21; } } } /*[44]*/ // #pragma omp dummyFlush BARRIER_START /*[44]*/ #pragma omp barrier /*[45]*/ #pragma omp for nowait /*[45]*/ /*[45]*/ /*[45]*/ for (j = jst; j <= jend; j++) { /*[45]*/ /*[45]*/ /*[45]*/ /*[45]*/ /*[45]*/ for (k = 1; k <= nz - 2; k++) { /*[45]*/ /*[45]*/ /*[45]*/ /*[45]*/ /*[45]*/ for (i = ist; i <= iend; i++) { /*[45]*/ /*[45]*/ /*[45]*/ /*[45]*/ /*[45]*/ for (m = 0; m < 5; m++) { /*[45]*/ /*[45]*/ frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]); } } /*[45]*/ /*[45]*/ /*[45]*/ /*[45]*/ for (i = ist; i <= L2; i++) { /*[45]*/ /*[45]*/ tmp = 1.0 / rsd[i][j][k][0]; /*[45]*/ u21i = tmp * rsd[i][j][k][1]; /*[45]*/ u31i = tmp * rsd[i][j][k][2]; /*[45]*/ u41i = tmp * rsd[i][j][k][3]; /*[45]*/ u51i = tmp * rsd[i][j][k][4]; /*[45]*/ tmp = 1.0 / rsd[i - 1][j][k][0]; /*[45]*/ u21im1 = tmp * rsd[i - 1][j][k][1]; /*[45]*/ u31im1 = tmp * rsd[i - 1][j][k][2]; /*[45]*/ u41im1 = tmp * rsd[i - 1][j][k][3]; /*[45]*/ u51im1 = tmp * rsd[i - 1][j][k][4]; /*[45]*/ flux[i][j][k][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /*[45]*/ flux[i][j][k][2] = tx3 * (u31i - u31im1); /*[45]*/ flux[i][j][k][3] = tx3 * (u41i - u41im1); /*[45]*/ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * ((u21i * u21i + u31i * u31i + u41i * u41i) - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + (1.0 / 6.0) * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /*[45]*/ /*[45]*/ /*[45]*/ /*[45]*/ for (i = ist; i <= iend; i++) { /*[45]*/ /*[45]*/ frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * (rsd[i - 1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i + 1][j][k][0]); /*[45]*/ frct[i][j][k][1] = frct[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (rsd[i - 1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i + 1][j][k][1]); /*[45]*/ frct[i][j][k][2] = frct[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (rsd[i - 1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i + 1][j][k][2]); /*[45]*/ frct[i][j][k][3] = frct[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (rsd[i - 1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i + 1][j][k][3]); /*[45]*/ frct[i][j][k][4] = frct[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (rsd[i - 1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i + 1][j][k][4]); } /*[45]*/ /*[45]*/ /*[45]*/ /*[45]*/ for (m = 0; m < 5; m++) { /*[45]*/ /*[45]*/ frct[1][j][k][m] = frct[1][j][k][m] - dsspm * (+5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m]); /*[45]*/ frct[2][j][k][m] = frct[2][j][k][m] - dsspm * (-4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m]); } /*[45]*/ ist1 = 3; /*[45]*/ iend1 = nx - 4; /*[45]*/ /*[45]*/ /*[45]*/ /*[45]*/ for (i = ist1; i <= iend1; i++) { /*[45]*/ /*[45]*/ /*[45]*/ /*[45]*/ /*[45]*/ for (m = 0; m < 5; m++) { /*[45]*/ /*[45]*/ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]); } } /*[45]*/ /*[45]*/ /*[45]*/ /*[45]*/ for (m = 0; m < 5; m++) { /*[45]*/ /*[45]*/ frct[nx - 3][j][k][m] = frct[nx - 3][j][k][m] - dsspm * (rsd[nx - 5][j][k][m] - 4.0 * rsd[nx - 4][j][k][m] + 6.0 * rsd[nx - 3][j][k][m] - 4.0 * rsd[nx - 2][j][k][m]); /*[45]*/ frct[nx - 2][j][k][m] = frct[nx - 2][j][k][m] - dsspm * (rsd[nx - 4][j][k][m] - 4.0 * rsd[nx - 3][j][k][m] + 5.0 * rsd[nx - 2][j][k][m]); } } } /*[45]*/ // #pragma omp dummyFlush BARRIER_START /*[45]*/ #pragma omp barrier /*[46]*/ L1 = 0; /*[46]*/ L2 = ny - 1; /*[46]*/ #pragma omp for nowait /*[46]*/ /*[46]*/ /*[46]*/ for (i = ist; i <= iend; i++) { /*[46]*/ /*[46]*/ /*[46]*/ /*[46]*/ /*[46]*/ for (j = L1; j <= L2; j++) { /*[46]*/ /*[46]*/ /*[46]*/ /*[46]*/ /*[46]*/ for (k = 1; k <= nz - 2; k++) { /*[46]*/ /*[46]*/ flux[i][j][k][0] = rsd[i][j][k][2]; /*[46]*/ u31 = rsd[i][j][k][2] / rsd[i][j][k][0]; /*[46]*/ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /*[46]*/ flux[i][j][k][1] = rsd[i][j][k][1] * u31; /*[46]*/ flux[i][j][k][2] = rsd[i][j][k][2] * u31 + 0.40e+00 * (rsd[i][j][k][4] - q); /*[46]*/ flux[i][j][k][3] = rsd[i][j][k][3] * u31; /*[46]*/ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31; } } } /*[46]*/ // #pragma omp dummyFlush BARRIER_START /*[46]*/ #pragma omp barrier /*[47]*/ #pragma omp for nowait /*[47]*/ /*[47]*/ /*[47]*/ for (i = ist; i <= iend; i++) { /*[47]*/ /*[47]*/ /*[47]*/ /*[47]*/ /*[47]*/ for (k = 1; k <= nz - 2; k++) { /*[47]*/ /*[47]*/ /*[47]*/ /*[47]*/ /*[47]*/ for (j = jst; j <= jend; j++) { /*[47]*/ /*[47]*/ /*[47]*/ /*[47]*/ /*[47]*/ for (m = 0; m < 5; m++) { /*[47]*/ /*[47]*/ frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]); } } /*[47]*/ /*[47]*/ /*[47]*/ /*[47]*/ for (j = jst; j <= L2; j++) { /*[47]*/ /*[47]*/ tmp = 1.0 / rsd[i][j][k][0]; /*[47]*/ u21j = tmp * rsd[i][j][k][1]; /*[47]*/ u31j = tmp * rsd[i][j][k][2]; /*[47]*/ u41j = tmp * rsd[i][j][k][3]; /*[47]*/ u51j = tmp * rsd[i][j][k][4]; /*[47]*/ tmp = 1.0 / rsd[i][j - 1][k][0]; /*[47]*/ u21jm1 = tmp * rsd[i][j - 1][k][1]; /*[47]*/ u31jm1 = tmp * rsd[i][j - 1][k][2]; /*[47]*/ u41jm1 = tmp * rsd[i][j - 1][k][3]; /*[47]*/ u51jm1 = tmp * rsd[i][j - 1][k][4]; /*[47]*/ flux[i][j][k][1] = ty3 * (u21j - u21jm1); /*[47]*/ flux[i][j][k][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /*[47]*/ flux[i][j][k][3] = ty3 * (u41j - u41jm1); /*[47]*/ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * ((u21j * u21j + u31j * u31j + u41j * u41j) - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + (1.0 / 6.0) * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /*[47]*/ /*[47]*/ /*[47]*/ /*[47]*/ for (j = jst; j <= jend; j++) { /*[47]*/ /*[47]*/ frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * (rsd[i][j - 1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j + 1][k][0]); /*[47]*/ frct[i][j][k][1] = frct[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (rsd[i][j - 1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j + 1][k][1]); /*[47]*/ frct[i][j][k][2] = frct[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (rsd[i][j - 1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j + 1][k][2]); /*[47]*/ frct[i][j][k][3] = frct[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (rsd[i][j - 1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j + 1][k][3]); /*[47]*/ frct[i][j][k][4] = frct[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (rsd[i][j - 1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j + 1][k][4]); } /*[47]*/ /*[47]*/ /*[47]*/ /*[47]*/ for (m = 0; m < 5; m++) { /*[47]*/ /*[47]*/ frct[i][1][k][m] = frct[i][1][k][m] - dsspm * (+5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m]); /*[47]*/ frct[i][2][k][m] = frct[i][2][k][m] - dsspm * (-4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m]); } /*[47]*/ jst1 = 3; /*[47]*/ jend1 = ny - 4; /*[47]*/ /*[47]*/ /*[47]*/ /*[47]*/ for (j = jst1; j <= jend1; j++) { /*[47]*/ /*[47]*/ /*[47]*/ /*[47]*/ /*[47]*/ for (m = 0; m < 5; m++) { /*[47]*/ /*[47]*/ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]); } } /*[47]*/ /*[47]*/ /*[47]*/ /*[47]*/ for (m = 0; m < 5; m++) { /*[47]*/ /*[47]*/ frct[i][ny - 3][k][m] = frct[i][ny - 3][k][m] - dsspm * (rsd[i][ny - 5][k][m] - 4.0 * rsd[i][ny - 4][k][m] + 6.0 * rsd[i][ny - 3][k][m] - 4.0 * rsd[i][ny - 2][k][m]); /*[47]*/ frct[i][ny - 2][k][m] = frct[i][ny - 2][k][m] - dsspm * (rsd[i][ny - 4][k][m] - 4.0 * rsd[i][ny - 3][k][m] + 5.0 * rsd[i][ny - 2][k][m]); } } } /*[47]*/ // #pragma omp dummyFlush BARRIER_START /*[47]*/ #pragma omp barrier /*[48]*/ #pragma omp for nowait /*[48]*/ /*[48]*/ /*[48]*/ for (i = ist; i <= iend; i++) { /*[48]*/ /*[48]*/ /*[48]*/ /*[48]*/ /*[48]*/ for (j = jst; j <= jend; j++) { /*[48]*/ /*[48]*/ /*[48]*/ /*[48]*/ /*[48]*/ for (k = 0; k <= nz - 1; k++) { /*[48]*/ /*[48]*/ flux[i][j][k][0] = rsd[i][j][k][3]; /*[48]*/ u41 = rsd[i][j][k][3] / rsd[i][j][k][0]; /*[48]*/ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /*[48]*/ flux[i][j][k][1] = rsd[i][j][k][1] * u41; /*[48]*/ flux[i][j][k][2] = rsd[i][j][k][2] * u41; /*[48]*/ flux[i][j][k][3] = rsd[i][j][k][3] * u41 + 0.40e+00 * (rsd[i][j][k][4] - q); /*[48]*/ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41; } /*[48]*/ /*[48]*/ /*[48]*/ /*[48]*/ for (k = 1; k <= nz - 2; k++) { /*[48]*/ /*[48]*/ /*[48]*/ /*[48]*/ /*[48]*/ for (m = 0; m < 5; m++) { /*[48]*/ /*[48]*/ frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]); } } /*[48]*/ /*[48]*/ /*[48]*/ /*[48]*/ for (k = 1; k <= nz - 1; k++) { /*[48]*/ /*[48]*/ tmp = 1.0 / rsd[i][j][k][0]; /*[48]*/ u21k = tmp * rsd[i][j][k][1]; /*[48]*/ u31k = tmp * rsd[i][j][k][2]; /*[48]*/ u41k = tmp * rsd[i][j][k][3]; /*[48]*/ u51k = tmp * rsd[i][j][k][4]; /*[48]*/ tmp = 1.0 / rsd[i][j][k - 1][0]; /*[48]*/ u21km1 = tmp * rsd[i][j][k - 1][1]; /*[48]*/ u31km1 = tmp * rsd[i][j][k - 1][2]; /*[48]*/ u41km1 = tmp * rsd[i][j][k - 1][3]; /*[48]*/ u51km1 = tmp * rsd[i][j][k - 1][4]; /*[48]*/ flux[i][j][k][1] = tz3 * (u21k - u21km1); /*[48]*/ flux[i][j][k][2] = tz3 * (u31k - u31km1); /*[48]*/ flux[i][j][k][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /*[48]*/ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * ((u21k * u21k + u31k * u31k + u41k * u41k) - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + (1.0 / 6.0) * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /*[48]*/ /*[48]*/ /*[48]*/ /*[48]*/ for (k = 1; k <= nz - 2; k++) { /*[48]*/ /*[48]*/ frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * (rsd[i][j][k + 1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k - 1][0]); /*[48]*/ frct[i][j][k][1] = frct[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (rsd[i][j][k + 1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k - 1][1]); /*[48]*/ frct[i][j][k][2] = frct[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (rsd[i][j][k + 1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k - 1][2]); /*[48]*/ frct[i][j][k][3] = frct[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (rsd[i][j][k + 1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k - 1][3]); /*[48]*/ frct[i][j][k][4] = frct[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (rsd[i][j][k + 1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k - 1][4]); } /*[48]*/ /*[48]*/ /*[48]*/ /*[48]*/ for (m = 0; m < 5; m++) { /*[48]*/ /*[48]*/ frct[i][j][1][m] = frct[i][j][1][m] - dsspm * (+5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m]); /*[48]*/ frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (-4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]); } /*[48]*/ /*[48]*/ /*[48]*/ /*[48]*/ for (k = 3; k <= nz - 4; k++) { /*[48]*/ /*[48]*/ /*[48]*/ /*[48]*/ /*[48]*/ for (m = 0; m < 5; m++) { /*[48]*/ /*[48]*/ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]); } } /*[48]*/ /*[48]*/ /*[48]*/ /*[48]*/ for (m = 0; m < 5; m++) { /*[48]*/ /*[48]*/ frct[i][j][nz - 3][m] = frct[i][j][nz - 3][m] - dsspm * (rsd[i][j][nz - 5][m] - 4.0 * rsd[i][j][nz - 4][m] + 6.0 * rsd[i][j][nz - 3][m] - 4.0 * rsd[i][j][nz - 2][m]); /*[48]*/ frct[i][j][nz - 2][m] = frct[i][j][nz - 2][m] - dsspm * (rsd[i][j][nz - 4][m] - 4.0 * rsd[i][j][nz - 3][m] + 5.0 * rsd[i][j][nz - 2][m]); } } } } } /*[]*/ static void error() { /*[]*/ /*[]*/ int i; /*[]*/ int j; /*[]*/ int k; /*[]*/ int m; /*[]*/ int iglob; /*[]*/ int jglob; /*[]*/ double tmp; /*[]*/ double u000ijk[5]; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ errnm[m] = 0.0; } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (i = ist; i <= iend; i++) { /*[]*/ /*[]*/ iglob = i; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (j = jst; j <= jend; j++) { /*[]*/ /*[]*/ jglob = j; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = 1; k <= nz - 2; k++) { /*[]*/ /*[]*/ exact(iglob, jglob, k, u000ijk); /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ tmp = (u000ijk[m] - u[i][j][k][m]); /*[]*/ errnm[m] = errnm[m] + tmp * tmp; } } } } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ double _imopVarPre151; /*[]*/ double _imopVarPre152; /*[]*/ _imopVarPre151 = errnm[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /*[]*/ _imopVarPre152 = sqrt(_imopVarPre151); /*[]*/ /*[]*/ errnm[m] = _imopVarPre152; } } /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ static void exact(int i, int j , int k , double u000ijk[5]) { /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ int m; /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ double xi; /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ double eta; /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ double zeta; /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ xi = ((double) i) / (nx0 - 1); /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ eta = ((double) j) / (ny0 - 1); /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ zeta = ((double) k) / (nz - 1); /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ for (m = 0; m < 5; m++) { /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ /*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/ u000ijk[m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ static void jacld(int k) { /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ int i; /*[13, 14, 25, 41]*/ int j; /*[13, 14, 25, 41]*/ double r43; /*[13, 14, 25, 41]*/ double c1345; /*[13, 14, 25, 41]*/ double c34; /*[13, 14, 25, 41]*/ double tmp1; /*[13, 14, 25, 41]*/ double tmp2; /*[13, 14, 25, 41]*/ double tmp3; /*[13, 14, 25, 41]*/ r43 = (4.0 / 3.0); /*[13, 14, 25, 41]*/ c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00; /*[13, 14, 25, 41]*/ c34 = 1.00e-01 * 1.00e+00; /*[13, 14, 25, 41]*/ #pragma omp for nowait schedule(static) /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ for (i = ist; i <= iend; i++) { /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ for (j = jst; j <= jend; j++) { /*[13, 14, 25, 41]*/ /*[13, 14, 25, 41]*/ tmp1 = 1.0 / u[i][j][k][0]; /*[13, 14, 25, 41]*/ tmp2 = tmp1 * tmp1; /*[13, 14, 25, 41]*/ tmp3 = tmp1 * tmp2; /*[13, 14, 25, 41]*/ d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1); /*[13, 14, 25, 41]*/ d[i][j][0][1] = 0.0; /*[13, 14, 25, 41]*/ d[i][j][0][2] = 0.0; /*[13, 14, 25, 41]*/ d[i][j][0][3] = 0.0; /*[13, 14, 25, 41]*/ d[i][j][0][4] = 0.0; /*[13, 14, 25, 41]*/ d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1])); /*[13, 14, 25, 41]*/ d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2); /*[13, 14, 25, 41]*/ d[i][j][1][2] = 0.0; /*[13, 14, 25, 41]*/ d[i][j][1][3] = 0.0; /*[13, 14, 25, 41]*/ d[i][j][1][4] = 0.0; /*[13, 14, 25, 41]*/ d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2])); /*[13, 14, 25, 41]*/ d[i][j][2][1] = 0.0; /*[13, 14, 25, 41]*/ d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3); /*[13, 14, 25, 41]*/ d[i][j][2][3] = 0.0; /*[13, 14, 25, 41]*/ d[i][j][2][4] = 0.0; /*[13, 14, 25, 41]*/ d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3])); /*[13, 14, 25, 41]*/ d[i][j][3][1] = 0.0; /*[13, 14, 25, 41]*/ d[i][j][3][2] = 0.0; /*[13, 14, 25, 41]*/ d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4); /*[13, 14, 25, 41]*/ d[i][j][3][4] = 0.0; /*[13, 14, 25, 41]*/ d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4])); /*[13, 14, 25, 41]*/ d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]); /*[13, 14, 25, 41]*/ d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]); /*[13, 14, 25, 41]*/ d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]); /*[13, 14, 25, 41]*/ d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5); /*[13, 14, 25, 41]*/ tmp1 = 1.0 / u[i][j][k - 1][0]; /*[13, 14, 25, 41]*/ tmp2 = tmp1 * tmp1; /*[13, 14, 25, 41]*/ tmp3 = tmp1 * tmp2; /*[13, 14, 25, 41]*/ a[i][j][0][0] = -dt * tz1 * dz1; /*[13, 14, 25, 41]*/ a[i][j][0][1] = 0.0; /*[13, 14, 25, 41]*/ a[i][j][0][2] = 0.0; /*[13, 14, 25, 41]*/ a[i][j][0][3] = -dt * tz2; /*[13, 14, 25, 41]*/ a[i][j][0][4] = 0.0; /*[13, 14, 25, 41]*/ a[i][j][1][0] = -dt * tz2 * (-(u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][1]); /*[13, 14, 25, 41]*/ a[i][j][1][1] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2; /*[13, 14, 25, 41]*/ a[i][j][1][2] = 0.0; /*[13, 14, 25, 41]*/ a[i][j][1][3] = -dt * tz2 * (u[i][j][k - 1][1] * tmp1); /*[13, 14, 25, 41]*/ a[i][j][1][4] = 0.0; /*[13, 14, 25, 41]*/ a[i][j][2][0] = -dt * tz2 * (-(u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][2]); /*[13, 14, 25, 41]*/ a[i][j][2][1] = 0.0; /*[13, 14, 25, 41]*/ a[i][j][2][2] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3; /*[13, 14, 25, 41]*/ a[i][j][2][3] = -dt * tz2 * (u[i][j][k - 1][2] * tmp1); /*[13, 14, 25, 41]*/ a[i][j][2][4] = 0.0; /*[13, 14, 25, 41]*/ a[i][j][3][0] = -dt * tz2 * (-(u[i][j][k - 1][3] * tmp1) * (u[i][j][k - 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k - 1][3]); /*[13, 14, 25, 41]*/ a[i][j][3][1] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][1] * tmp1)); /*[13, 14, 25, 41]*/ a[i][j][3][2] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][2] * tmp1)); /*[13, 14, 25, 41]*/ a[i][j][3][3] = -dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4; /*[13, 14, 25, 41]*/ a[i][j][3][4] = -dt * tz2 * 0.40e+00; /*[13, 14, 25, 41]*/ a[i][j][4][0] = -dt * tz2 * ((0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k - 1][4] * tmp1)) * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k - 1][1] * u[i][j][k - 1][1]) - (c34 - c1345) * tmp3 * (u[i][j][k - 1][2] * u[i][j][k - 1][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k - 1][3] * u[i][j][k - 1][3]) - c1345 * tmp2 * u[i][j][k - 1][4]); /*[13, 14, 25, 41]*/ a[i][j][4][1] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][1]; /*[13, 14, 25, 41]*/ a[i][j][4][2] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][2]; /*[13, 14, 25, 41]*/ a[i][j][4][3] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + 3.0 * u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k - 1][3]; /*[13, 14, 25, 41]*/ a[i][j][4][4] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; /*[13, 14, 25, 41]*/ tmp1 = 1.0 / u[i][j - 1][k][0]; /*[13, 14, 25, 41]*/ tmp2 = tmp1 * tmp1; /*[13, 14, 25, 41]*/ tmp3 = tmp1 * tmp2; /*[13, 14, 25, 41]*/ b[i][j][0][0] = -dt * ty1 * dy1; /*[13, 14, 25, 41]*/ b[i][j][0][1] = 0.0; /*[13, 14, 25, 41]*/ b[i][j][0][2] = -dt * ty2; /*[13, 14, 25, 41]*/ b[i][j][0][3] = 0.0; /*[13, 14, 25, 41]*/ b[i][j][0][4] = 0.0; /*[13, 14, 25, 41]*/ b[i][j][1][0] = -dt * ty2 * (-(u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][1]); /*[13, 14, 25, 41]*/ b[i][j][1][1] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2; /*[13, 14, 25, 41]*/ b[i][j][1][2] = -dt * ty2 * (u[i][j - 1][k][1] * tmp1); /*[13, 14, 25, 41]*/ b[i][j][1][3] = 0.0; /*[13, 14, 25, 41]*/ b[i][j][1][4] = 0.0; /*[13, 14, 25, 41]*/ b[i][j][2][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * tmp1) * (u[i][j - 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j - 1][k][2]); /*[13, 14, 25, 41]*/ b[i][j][2][1] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][1] * tmp1)); /*[13, 14, 25, 41]*/ b[i][j][2][2] = -dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3; /*[13, 14, 25, 41]*/ b[i][j][2][3] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][3] * tmp1)); /*[13, 14, 25, 41]*/ b[i][j][2][4] = -dt * ty2 * 0.40e+00; /*[13, 14, 25, 41]*/ b[i][j][3][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][3]); /*[13, 14, 25, 41]*/ b[i][j][3][1] = 0.0; /*[13, 14, 25, 41]*/ b[i][j][3][2] = -dt * ty2 * (u[i][j - 1][k][3] * tmp1); /*[13, 14, 25, 41]*/ b[i][j][3][3] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4; /*[13, 14, 25, 41]*/ b[i][j][3][4] = 0.0; /*[13, 14, 25, 41]*/ b[i][j][4][0] = -dt * ty2 * ((0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j - 1][k][4] * tmp1)) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j - 1][k][1]) * (u[i][j - 1][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j - 1][k][2]) * (u[i][j - 1][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j - 1][k][3]) * (u[i][j - 1][k][3]))) - c1345 * tmp2 * u[i][j - 1][k][4]); /*[13, 14, 25, 41]*/ b[i][j][4][1] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][1]; /*[13, 14, 25, 41]*/ b[i][j][4][2] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + 3.0 * u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j - 1][k][2]; /*[13, 14, 25, 41]*/ b[i][j][4][3] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][3]; /*[13, 14, 25, 41]*/ b[i][j][4][4] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /*[13, 14, 25, 41]*/ tmp1 = 1.0 / u[i - 1][j][k][0]; /*[13, 14, 25, 41]*/ tmp2 = tmp1 * tmp1; /*[13, 14, 25, 41]*/ tmp3 = tmp1 * tmp2; /*[13, 14, 25, 41]*/ c[i][j][0][0] = -dt * tx1 * dx1; /*[13, 14, 25, 41]*/ c[i][j][0][1] = -dt * tx2; /*[13, 14, 25, 41]*/ c[i][j][0][2] = 0.0; /*[13, 14, 25, 41]*/ c[i][j][0][3] = 0.0; /*[13, 14, 25, 41]*/ c[i][j][0][4] = 0.0; /*[13, 14, 25, 41]*/ c[i][j][1][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * tmp1) * (u[i - 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i - 1][j][k][1]); /*[13, 14, 25, 41]*/ c[i][j][1][1] = -dt * tx2 * ((2.0 - 0.40e+00) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2; /*[13, 14, 25, 41]*/ c[i][j][1][2] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][2] * tmp1)); /*[13, 14, 25, 41]*/ c[i][j][1][3] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][3] * tmp1)); /*[13, 14, 25, 41]*/ c[i][j][1][4] = -dt * tx2 * 0.40e+00; /*[13, 14, 25, 41]*/ c[i][j][2][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][2]); /*[13, 14, 25, 41]*/ c[i][j][2][1] = -dt * tx2 * (u[i - 1][j][k][2] * tmp1); /*[13, 14, 25, 41]*/ c[i][j][2][2] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3; /*[13, 14, 25, 41]*/ c[i][j][2][3] = 0.0; /*[13, 14, 25, 41]*/ c[i][j][2][4] = 0.0; /*[13, 14, 25, 41]*/ c[i][j][3][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][3]); /*[13, 14, 25, 41]*/ c[i][j][3][1] = -dt * tx2 * (u[i - 1][j][k][3] * tmp1); /*[13, 14, 25, 41]*/ c[i][j][3][2] = 0.0; /*[13, 14, 25, 41]*/ c[i][j][3][3] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4; /*[13, 14, 25, 41]*/ c[i][j][3][4] = 0.0; /*[13, 14, 25, 41]*/ c[i][j][4][0] = -dt * tx2 * ((0.40e+00 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i - 1][j][k][4] * tmp1)) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i - 1][j][k][1]) * (u[i - 1][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i - 1][j][k][2]) * (u[i - 1][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i - 1][j][k][3]) * (u[i - 1][j][k][3]))) - c1345 * tmp2 * u[i - 1][j][k][4]); /*[13, 14, 25, 41]*/ c[i][j][4][1] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i - 1][j][k][1]; /*[13, 14, 25, 41]*/ c[i][j][4][2] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][2] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][2]; /*[13, 14, 25, 41]*/ c[i][j][4][3] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][3] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][3]; /*[13, 14, 25, 41]*/ c[i][j][4][4] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; } } } /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ static void jacu(int k) { /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ int i; /*[13, 14, 26, 42]*/ int j; /*[13, 14, 26, 42]*/ double r43; /*[13, 14, 26, 42]*/ double c1345; /*[13, 14, 26, 42]*/ double c34; /*[13, 14, 26, 42]*/ double tmp1; /*[13, 14, 26, 42]*/ double tmp2; /*[13, 14, 26, 42]*/ double tmp3; /*[13, 14, 26, 42]*/ r43 = (4.0 / 3.0); /*[13, 14, 26, 42]*/ c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00; /*[13, 14, 26, 42]*/ c34 = 1.00e-01 * 1.00e+00; /*[13, 14, 26, 42]*/ #pragma omp for nowait schedule(static) /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ for (i = iend; i >= ist; i--) { /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ for (j = jend; j >= jst; j--) { /*[13, 14, 26, 42]*/ /*[13, 14, 26, 42]*/ tmp1 = 1.0 / u[i][j][k][0]; /*[13, 14, 26, 42]*/ tmp2 = tmp1 * tmp1; /*[13, 14, 26, 42]*/ tmp3 = tmp1 * tmp2; /*[13, 14, 26, 42]*/ d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1); /*[13, 14, 26, 42]*/ d[i][j][0][1] = 0.0; /*[13, 14, 26, 42]*/ d[i][j][0][2] = 0.0; /*[13, 14, 26, 42]*/ d[i][j][0][3] = 0.0; /*[13, 14, 26, 42]*/ d[i][j][0][4] = 0.0; /*[13, 14, 26, 42]*/ d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1])); /*[13, 14, 26, 42]*/ d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2); /*[13, 14, 26, 42]*/ d[i][j][1][2] = 0.0; /*[13, 14, 26, 42]*/ d[i][j][1][3] = 0.0; /*[13, 14, 26, 42]*/ d[i][j][1][4] = 0.0; /*[13, 14, 26, 42]*/ d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2])); /*[13, 14, 26, 42]*/ d[i][j][2][1] = 0.0; /*[13, 14, 26, 42]*/ d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3); /*[13, 14, 26, 42]*/ d[i][j][2][3] = 0.0; /*[13, 14, 26, 42]*/ d[i][j][2][4] = 0.0; /*[13, 14, 26, 42]*/ d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3])); /*[13, 14, 26, 42]*/ d[i][j][3][1] = 0.0; /*[13, 14, 26, 42]*/ d[i][j][3][2] = 0.0; /*[13, 14, 26, 42]*/ d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4); /*[13, 14, 26, 42]*/ d[i][j][3][4] = 0.0; /*[13, 14, 26, 42]*/ d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4])); /*[13, 14, 26, 42]*/ d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]); /*[13, 14, 26, 42]*/ d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]); /*[13, 14, 26, 42]*/ d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]); /*[13, 14, 26, 42]*/ d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5); /*[13, 14, 26, 42]*/ tmp1 = 1.0 / u[i + 1][j][k][0]; /*[13, 14, 26, 42]*/ tmp2 = tmp1 * tmp1; /*[13, 14, 26, 42]*/ tmp3 = tmp1 * tmp2; /*[13, 14, 26, 42]*/ a[i][j][0][0] = -dt * tx1 * dx1; /*[13, 14, 26, 42]*/ a[i][j][0][1] = dt * tx2; /*[13, 14, 26, 42]*/ a[i][j][0][2] = 0.0; /*[13, 14, 26, 42]*/ a[i][j][0][3] = 0.0; /*[13, 14, 26, 42]*/ a[i][j][0][4] = 0.0; /*[13, 14, 26, 42]*/ a[i][j][1][0] = dt * tx2 * (-(u[i + 1][j][k][1] * tmp1) * (u[i + 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i + 1][j][k][1]); /*[13, 14, 26, 42]*/ a[i][j][1][1] = dt * tx2 * ((2.0 - 0.40e+00) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2; /*[13, 14, 26, 42]*/ a[i][j][1][2] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][2] * tmp1)); /*[13, 14, 26, 42]*/ a[i][j][1][3] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][3] * tmp1)); /*[13, 14, 26, 42]*/ a[i][j][1][4] = dt * tx2 * 0.40e+00; /*[13, 14, 26, 42]*/ a[i][j][2][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][2]); /*[13, 14, 26, 42]*/ a[i][j][2][1] = dt * tx2 * (u[i + 1][j][k][2] * tmp1); /*[13, 14, 26, 42]*/ a[i][j][2][2] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3; /*[13, 14, 26, 42]*/ a[i][j][2][3] = 0.0; /*[13, 14, 26, 42]*/ a[i][j][2][4] = 0.0; /*[13, 14, 26, 42]*/ a[i][j][3][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][3]); /*[13, 14, 26, 42]*/ a[i][j][3][1] = dt * tx2 * (u[i + 1][j][k][3] * tmp1); /*[13, 14, 26, 42]*/ a[i][j][3][2] = 0.0; /*[13, 14, 26, 42]*/ a[i][j][3][3] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4; /*[13, 14, 26, 42]*/ a[i][j][3][4] = 0.0; /*[13, 14, 26, 42]*/ a[i][j][4][0] = dt * tx2 * ((0.40e+00 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i + 1][j][k][4] * tmp1)) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i + 1][j][k][1]) * (u[i + 1][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i + 1][j][k][2]) * (u[i + 1][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i + 1][j][k][3]) * (u[i + 1][j][k][3]))) - c1345 * tmp2 * u[i + 1][j][k][4]); /*[13, 14, 26, 42]*/ a[i][j][4][1] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i + 1][j][k][1]; /*[13, 14, 26, 42]*/ a[i][j][4][2] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][2] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][2]; /*[13, 14, 26, 42]*/ a[i][j][4][3] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][3] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][3]; /*[13, 14, 26, 42]*/ a[i][j][4][4] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; /*[13, 14, 26, 42]*/ tmp1 = 1.0 / u[i][j + 1][k][0]; /*[13, 14, 26, 42]*/ tmp2 = tmp1 * tmp1; /*[13, 14, 26, 42]*/ tmp3 = tmp1 * tmp2; /*[13, 14, 26, 42]*/ b[i][j][0][0] = -dt * ty1 * dy1; /*[13, 14, 26, 42]*/ b[i][j][0][1] = 0.0; /*[13, 14, 26, 42]*/ b[i][j][0][2] = dt * ty2; /*[13, 14, 26, 42]*/ b[i][j][0][3] = 0.0; /*[13, 14, 26, 42]*/ b[i][j][0][4] = 0.0; /*[13, 14, 26, 42]*/ b[i][j][1][0] = dt * ty2 * (-(u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][1]); /*[13, 14, 26, 42]*/ b[i][j][1][1] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2; /*[13, 14, 26, 42]*/ b[i][j][1][2] = dt * ty2 * (u[i][j + 1][k][1] * tmp1); /*[13, 14, 26, 42]*/ b[i][j][1][3] = 0.0; /*[13, 14, 26, 42]*/ b[i][j][1][4] = 0.0; /*[13, 14, 26, 42]*/ b[i][j][2][0] = dt * ty2 * (-(u[i][j + 1][k][2] * tmp1) * (u[i][j + 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j + 1][k][2]); /*[13, 14, 26, 42]*/ b[i][j][2][1] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][1] * tmp1)); /*[13, 14, 26, 42]*/ b[i][j][2][2] = dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3; /*[13, 14, 26, 42]*/ b[i][j][2][3] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][3] * tmp1)); /*[13, 14, 26, 42]*/ b[i][j][2][4] = dt * ty2 * 0.40e+00; /*[13, 14, 26, 42]*/ b[i][j][3][0] = dt * ty2 * (-(u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][3]); /*[13, 14, 26, 42]*/ b[i][j][3][1] = 0.0; /*[13, 14, 26, 42]*/ b[i][j][3][2] = dt * ty2 * (u[i][j + 1][k][3] * tmp1); /*[13, 14, 26, 42]*/ b[i][j][3][3] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4; /*[13, 14, 26, 42]*/ b[i][j][3][4] = 0.0; /*[13, 14, 26, 42]*/ b[i][j][4][0] = dt * ty2 * ((0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j + 1][k][4] * tmp1)) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j + 1][k][1]) * (u[i][j + 1][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j + 1][k][2]) * (u[i][j + 1][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j + 1][k][3]) * (u[i][j + 1][k][3]))) - c1345 * tmp2 * u[i][j + 1][k][4]); /*[13, 14, 26, 42]*/ b[i][j][4][1] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][1]; /*[13, 14, 26, 42]*/ b[i][j][4][2] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + 3.0 * u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j + 1][k][2]; /*[13, 14, 26, 42]*/ b[i][j][4][3] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][3]; /*[13, 14, 26, 42]*/ b[i][j][4][4] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /*[13, 14, 26, 42]*/ tmp1 = 1.0 / u[i][j][k + 1][0]; /*[13, 14, 26, 42]*/ tmp2 = tmp1 * tmp1; /*[13, 14, 26, 42]*/ tmp3 = tmp1 * tmp2; /*[13, 14, 26, 42]*/ c[i][j][0][0] = -dt * tz1 * dz1; /*[13, 14, 26, 42]*/ c[i][j][0][1] = 0.0; /*[13, 14, 26, 42]*/ c[i][j][0][2] = 0.0; /*[13, 14, 26, 42]*/ c[i][j][0][3] = dt * tz2; /*[13, 14, 26, 42]*/ c[i][j][0][4] = 0.0; /*[13, 14, 26, 42]*/ c[i][j][1][0] = dt * tz2 * (-(u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][1]); /*[13, 14, 26, 42]*/ c[i][j][1][1] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2; /*[13, 14, 26, 42]*/ c[i][j][1][2] = 0.0; /*[13, 14, 26, 42]*/ c[i][j][1][3] = dt * tz2 * (u[i][j][k + 1][1] * tmp1); /*[13, 14, 26, 42]*/ c[i][j][1][4] = 0.0; /*[13, 14, 26, 42]*/ c[i][j][2][0] = dt * tz2 * (-(u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][2]); /*[13, 14, 26, 42]*/ c[i][j][2][1] = 0.0; /*[13, 14, 26, 42]*/ c[i][j][2][2] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3; /*[13, 14, 26, 42]*/ c[i][j][2][3] = dt * tz2 * (u[i][j][k + 1][2] * tmp1); /*[13, 14, 26, 42]*/ c[i][j][2][4] = 0.0; /*[13, 14, 26, 42]*/ c[i][j][3][0] = dt * tz2 * (-(u[i][j][k + 1][3] * tmp1) * (u[i][j][k + 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k + 1][3]); /*[13, 14, 26, 42]*/ c[i][j][3][1] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][1] * tmp1)); /*[13, 14, 26, 42]*/ c[i][j][3][2] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][2] * tmp1)); /*[13, 14, 26, 42]*/ c[i][j][3][3] = dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4; /*[13, 14, 26, 42]*/ c[i][j][3][4] = dt * tz2 * 0.40e+00; /*[13, 14, 26, 42]*/ c[i][j][4][0] = dt * tz2 * ((0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k + 1][4] * tmp1)) * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k + 1][1]) * (u[i][j][k + 1][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k + 1][2]) * (u[i][j][k + 1][2]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k + 1][3]) * (u[i][j][k + 1][3]))) - c1345 * tmp2 * u[i][j][k + 1][4]); /*[13, 14, 26, 42]*/ c[i][j][4][1] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][1]; /*[13, 14, 26, 42]*/ c[i][j][4][2] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][2]; /*[13, 14, 26, 42]*/ c[i][j][4][3] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + 3.0 * u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k + 1][3]; /*[13, 14, 26, 42]*/ c[i][j][4][4] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; } } } /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ static void l2norm(int nx0, int ny0 , int nz0 , int ist , int iend , int jst , int jend , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double sum[5]) { /*[]*/ /*[]*/ int i; /*[]*/ int j; /*[]*/ int k; /*[]*/ int m; /*[]*/ double sum0 = 0.0; /*[]*/ double sum1 = 0.0; /*[]*/ double sum2 = 0.0; /*[]*/ double sum3 = 0.0; /*[]*/ double sum4 = 0.0; /*[]*/ #pragma omp single nowait { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ sum[m] = 0.0; } } /*[]*/ #pragma omp for nowait /*[]*/ /*[]*/ /*[]*/ for (i = ist; i <= iend; i++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (j = jst; j <= jend; j++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = 1; k <= nz0 - 2; k++) { /*[]*/ /*[]*/ sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0]; /*[]*/ sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1]; /*[]*/ sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2]; /*[]*/ sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3]; /*[]*/ sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4]; } } } /*[]*/ // #pragma omp dummyFlush CRITICAL_START /*[]*/ #pragma omp critical { /*[]*/ /*[]*/ sum[0] += sum0; /*[]*/ sum[1] += sum1; /*[]*/ sum[2] += sum2; /*[]*/ sum[3] += sum3; /*[]*/ sum[4] += sum4; } /*[]*/ // #pragma omp dummyFlush CRITICAL_END /*[]*/ #pragma omp single nowait { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ double _imopVarPre154; /*[]*/ double _imopVarPre155; /*[]*/ _imopVarPre154 = sum[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /*[]*/ _imopVarPre155 = sqrt(_imopVarPre154); /*[]*/ /*[]*/ sum[m] = _imopVarPre155; } } } /*[]*/ static void pintgr() { /*[]*/ /*[]*/ int i; /*[]*/ int j; /*[]*/ int k; /*[]*/ int ibeg; /*[]*/ int ifin; /*[]*/ int ifin1; /*[]*/ int jbeg; /*[]*/ int jfin; /*[]*/ int jfin1; /*[]*/ int iglob; /*[]*/ int iglob1; /*[]*/ int iglob2; /*[]*/ int jglob; /*[]*/ int jglob1; /*[]*/ int jglob2; /*[]*/ double phi1[12 + 2][12 + 2]; /*[]*/ double phi2[12 + 2][12 + 2]; /*[]*/ double frc1; /*[]*/ double frc2; /*[]*/ double frc3; /*[]*/ ibeg = nx; /*[]*/ ifin = 0; /*[]*/ iglob1 = -1; /*[]*/ iglob2 = nx - 1; /*[]*/ int _imopVarPre157; /*[]*/ _imopVarPre157 = iglob1 >= ii1; /*[]*/ /*[]*/ if (_imopVarPre157) { /*[]*/ /*[]*/ _imopVarPre157 = iglob2 < ii2 + nx; } /*[]*/ /*[]*/ if (_imopVarPre157) { /*[]*/ /*[]*/ ibeg = 0; } /*[]*/ int _imopVarPre159; /*[]*/ _imopVarPre159 = iglob1 >= ii1 - nx; /*[]*/ /*[]*/ if (_imopVarPre159) { /*[]*/ /*[]*/ _imopVarPre159 = iglob2 <= ii2; } /*[]*/ /*[]*/ if (_imopVarPre159) { /*[]*/ /*[]*/ ifin = nx; } /*[]*/ int _imopVarPre161; /*[]*/ _imopVarPre161 = ii1 >= iglob1; /*[]*/ /*[]*/ if (_imopVarPre161) { /*[]*/ /*[]*/ _imopVarPre161 = ii1 <= iglob2; } /*[]*/ /*[]*/ if (_imopVarPre161) { /*[]*/ /*[]*/ ibeg = ii1; } /*[]*/ int _imopVarPre163; /*[]*/ _imopVarPre163 = ii2 >= iglob1; /*[]*/ /*[]*/ if (_imopVarPre163) { /*[]*/ /*[]*/ _imopVarPre163 = ii2 <= iglob2; } /*[]*/ /*[]*/ if (_imopVarPre163) { /*[]*/ /*[]*/ ifin = ii2; } /*[]*/ jbeg = ny; /*[]*/ jfin = -1; /*[]*/ jglob1 = 0; /*[]*/ jglob2 = ny - 1; /*[]*/ int _imopVarPre165; /*[]*/ _imopVarPre165 = jglob1 >= ji1; /*[]*/ /*[]*/ if (_imopVarPre165) { /*[]*/ /*[]*/ _imopVarPre165 = jglob2 < ji2 + ny; } /*[]*/ /*[]*/ if (_imopVarPre165) { /*[]*/ /*[]*/ jbeg = 0; } /*[]*/ int _imopVarPre167; /*[]*/ _imopVarPre167 = jglob1 > ji1 - ny; /*[]*/ /*[]*/ if (_imopVarPre167) { /*[]*/ /*[]*/ _imopVarPre167 = jglob2 <= ji2; } /*[]*/ /*[]*/ if (_imopVarPre167) { /*[]*/ /*[]*/ jfin = ny; } /*[]*/ int _imopVarPre169; /*[]*/ _imopVarPre169 = ji1 >= jglob1; /*[]*/ /*[]*/ if (_imopVarPre169) { /*[]*/ /*[]*/ _imopVarPre169 = ji1 <= jglob2; } /*[]*/ /*[]*/ if (_imopVarPre169) { /*[]*/ /*[]*/ jbeg = ji1; } /*[]*/ int _imopVarPre171; /*[]*/ _imopVarPre171 = ji2 >= jglob1; /*[]*/ /*[]*/ if (_imopVarPre171) { /*[]*/ /*[]*/ _imopVarPre171 = ji2 <= jglob2; } /*[]*/ /*[]*/ if (_imopVarPre171) { /*[]*/ /*[]*/ jfin = ji2; } /*[]*/ ifin1 = ifin; /*[]*/ jfin1 = jfin; /*[]*/ /*[]*/ if (ifin1 == ii2) { /*[]*/ /*[]*/ ifin1 = ifin - 1; } /*[]*/ /*[]*/ if (jfin1 == ji2) { /*[]*/ /*[]*/ jfin1 = jfin - 1; } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (i = 0; i <= 12 + 1; i++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = 0; k <= 12 + 1; k++) { /*[]*/ /*[]*/ phi1[i][k] = 0.0; /*[]*/ phi2[i][k] = 0.0; } } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (i = ibeg; i <= ifin; i++) { /*[]*/ /*[]*/ iglob = i; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (j = jbeg; j <= jfin; j++) { /*[]*/ /*[]*/ jglob = j; /*[]*/ k = ki1; /*[]*/ phi1[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (((u[i][j][k][1]) * (u[i][j][k][1])) + ((u[i][j][k][2]) * (u[i][j][k][2])) + ((u[i][j][k][3]) * (u[i][j][k][3]))) / u[i][j][k][0]); /*[]*/ k = ki2; /*[]*/ phi2[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (((u[i][j][k][1]) * (u[i][j][k][1])) + ((u[i][j][k][2]) * (u[i][j][k][2])) + ((u[i][j][k][3]) * (u[i][j][k][3]))) / u[i][j][k][0]); } } /*[]*/ frc1 = 0.0; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (i = ibeg; i <= ifin1; i++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (j = jbeg; j <= jfin1; j++) { /*[]*/ /*[]*/ frc1 = frc1 + (phi1[i][j] + phi1[i + 1][j] + phi1[i][j + 1] + phi1[i + 1][j + 1] + phi2[i][j] + phi2[i + 1][j] + phi2[i][j + 1] + phi2[i + 1][j + 1]); } } /*[]*/ frc1 = dxi * deta * frc1; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (i = 0; i <= 12 + 1; i++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = 0; k <= 12 + 1; k++) { /*[]*/ /*[]*/ phi1[i][k] = 0.0; /*[]*/ phi2[i][k] = 0.0; } } /*[]*/ jglob = jbeg; /*[]*/ /*[]*/ if (jglob == ji1) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (i = ibeg; i <= ifin; i++) { /*[]*/ /*[]*/ iglob = i; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = ki1; k <= ki2; k++) { /*[]*/ /*[]*/ phi1[i][k] = 0.40e+00 * (u[i][jbeg][k][4] - 0.50 * (((u[i][jbeg][k][1]) * (u[i][jbeg][k][1])) + ((u[i][jbeg][k][2]) * (u[i][jbeg][k][2])) + ((u[i][jbeg][k][3]) * (u[i][jbeg][k][3]))) / u[i][jbeg][k][0]); } } } /*[]*/ jglob = jfin; /*[]*/ /*[]*/ if (jglob == ji2) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (i = ibeg; i <= ifin; i++) { /*[]*/ /*[]*/ iglob = i; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = ki1; k <= ki2; k++) { /*[]*/ /*[]*/ phi2[i][k] = 0.40e+00 * (u[i][jfin][k][4] - 0.50 * (((u[i][jfin][k][1]) * (u[i][jfin][k][1])) + ((u[i][jfin][k][2]) * (u[i][jfin][k][2])) + ((u[i][jfin][k][3]) * (u[i][jfin][k][3]))) / u[i][jfin][k][0]); } } } /*[]*/ frc2 = 0.0; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (i = ibeg; i <= ifin1; i++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = ki1; k <= ki2 - 1; k++) { /*[]*/ /*[]*/ frc2 = frc2 + (phi1[i][k] + phi1[i + 1][k] + phi1[i][k + 1] + phi1[i + 1][k + 1] + phi2[i][k] + phi2[i + 1][k] + phi2[i][k + 1] + phi2[i + 1][k + 1]); } } /*[]*/ frc2 = dxi * dzeta * frc2; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (i = 0; i <= 12 + 1; i++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = 0; k <= 12 + 1; k++) { /*[]*/ /*[]*/ phi1[i][k] = 0.0; /*[]*/ phi2[i][k] = 0.0; } } /*[]*/ iglob = ibeg; /*[]*/ /*[]*/ if (iglob == ii1) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (j = jbeg; j <= jfin; j++) { /*[]*/ /*[]*/ jglob = j; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = ki1; k <= ki2; k++) { /*[]*/ /*[]*/ phi1[j][k] = 0.40e+00 * (u[ibeg][j][k][4] - 0.50 * (((u[ibeg][j][k][1]) * (u[ibeg][j][k][1])) + ((u[ibeg][j][k][2]) * (u[ibeg][j][k][2])) + ((u[ibeg][j][k][3]) * (u[ibeg][j][k][3]))) / u[ibeg][j][k][0]); } } } /*[]*/ iglob = ifin; /*[]*/ /*[]*/ if (iglob == ii2) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (j = jbeg; j <= jfin; j++) { /*[]*/ /*[]*/ jglob = j; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = ki1; k <= ki2; k++) { /*[]*/ /*[]*/ phi2[j][k] = 0.40e+00 * (u[ifin][j][k][4] - 0.50 * (((u[ifin][j][k][1]) * (u[ifin][j][k][1])) + ((u[ifin][j][k][2]) * (u[ifin][j][k][2])) + ((u[ifin][j][k][3]) * (u[ifin][j][k][3]))) / u[ifin][j][k][0]); } } } /*[]*/ frc3 = 0.0; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (j = jbeg; j <= jfin1; j++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = ki1; k <= ki2 - 1; k++) { /*[]*/ /*[]*/ frc3 = frc3 + (phi1[j][k] + phi1[j + 1][k] + phi1[j][k + 1] + phi1[j + 1][k + 1] + phi2[j][k] + phi2[j + 1][k] + phi2[j][k + 1] + phi2[j + 1][k + 1]); } } /*[]*/ frc3 = deta * dzeta * frc3; /*[]*/ frc = 0.25 * (frc1 + frc2 + frc3); } /*[]*/ static void read_input() { /*[]*/ /*[]*/ FILE *fp; /*[]*/ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - LU Benchmark\n\n"); /*[]*/ /*[]*/ fp = fopen("inputlu.data", "r"); /*[]*/ /*[]*/ /*[]*/ if (fp != ((void *) 0)) { /*[]*/ /*[]*/ printf(" Reading from input file inputlu.data\n"); /*[]*/ /*[]*/ int _imopVarPre173; /*[]*/ _imopVarPre173 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre173 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre173 = fgetc(fp); /*[]*/ } /*[]*/ int _imopVarPre175; /*[]*/ _imopVarPre175 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre175 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre175 = fgetc(fp); /*[]*/ } /*[]*/ int *_imopVarPre178; /*[]*/ int *_imopVarPre179; /*[]*/ _imopVarPre178 = &inorm; /*[]*/ _imopVarPre179 = &ipr; /*[]*/ fscanf(fp, "%d%d", _imopVarPre179, _imopVarPre178); /*[]*/ /*[]*/ int _imopVarPre181; /*[]*/ _imopVarPre181 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre181 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre181 = fgetc(fp); /*[]*/ } /*[]*/ int _imopVarPre183; /*[]*/ _imopVarPre183 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre183 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre183 = fgetc(fp); /*[]*/ } /*[]*/ int _imopVarPre185; /*[]*/ _imopVarPre185 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre185 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre185 = fgetc(fp); /*[]*/ } /*[]*/ int *_imopVarPre187; /*[]*/ _imopVarPre187 = &itmax; /*[]*/ fscanf(fp, "%d", _imopVarPre187); /*[]*/ /*[]*/ int _imopVarPre189; /*[]*/ _imopVarPre189 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre189 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre189 = fgetc(fp); /*[]*/ } /*[]*/ int _imopVarPre191; /*[]*/ _imopVarPre191 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre191 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre191 = fgetc(fp); /*[]*/ } /*[]*/ int _imopVarPre193; /*[]*/ _imopVarPre193 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre193 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre193 = fgetc(fp); /*[]*/ } /*[]*/ double *_imopVarPre195; /*[]*/ _imopVarPre195 = &dt; /*[]*/ fscanf(fp, "%lf", _imopVarPre195); /*[]*/ /*[]*/ int _imopVarPre197; /*[]*/ _imopVarPre197 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre197 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre197 = fgetc(fp); /*[]*/ } /*[]*/ int _imopVarPre199; /*[]*/ _imopVarPre199 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre199 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre199 = fgetc(fp); /*[]*/ } /*[]*/ int _imopVarPre201; /*[]*/ _imopVarPre201 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre201 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre201 = fgetc(fp); /*[]*/ } /*[]*/ double *_imopVarPre203; /*[]*/ _imopVarPre203 = &omega; /*[]*/ fscanf(fp, "%lf", _imopVarPre203); /*[]*/ /*[]*/ int _imopVarPre205; /*[]*/ _imopVarPre205 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre205 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre205 = fgetc(fp); /*[]*/ } /*[]*/ int _imopVarPre207; /*[]*/ _imopVarPre207 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre207 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre207 = fgetc(fp); /*[]*/ } /*[]*/ int _imopVarPre209; /*[]*/ _imopVarPre209 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre209 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre209 = fgetc(fp); /*[]*/ } /*[]*/ double *_imopVarPre215; /*[]*/ double *_imopVarPre216; /*[]*/ double *_imopVarPre217; /*[]*/ double *_imopVarPre218; /*[]*/ double *_imopVarPre219; /*[]*/ _imopVarPre215 = &tolrsd[4]; /*[]*/ _imopVarPre216 = &tolrsd[3]; /*[]*/ _imopVarPre217 = &tolrsd[2]; /*[]*/ _imopVarPre218 = &tolrsd[1]; /*[]*/ _imopVarPre219 = &tolrsd[0]; /*[]*/ fscanf(fp, "%lf%lf%lf%lf%lf", _imopVarPre219, _imopVarPre218, _imopVarPre217, _imopVarPre216, _imopVarPre215); /*[]*/ /*[]*/ int _imopVarPre221; /*[]*/ _imopVarPre221 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre221 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre221 = fgetc(fp); /*[]*/ } /*[]*/ int _imopVarPre223; /*[]*/ _imopVarPre223 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre223 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre223 = fgetc(fp); /*[]*/ } /*[]*/ int _imopVarPre225; /*[]*/ _imopVarPre225 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre225 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre225 = fgetc(fp); /*[]*/ } /*[]*/ int *_imopVarPre229; /*[]*/ int *_imopVarPre230; /*[]*/ int *_imopVarPre231; /*[]*/ _imopVarPre229 = &nz0; /*[]*/ _imopVarPre230 = &ny0; /*[]*/ _imopVarPre231 = &nx0; /*[]*/ fscanf(fp, "%d%d%d", _imopVarPre231, _imopVarPre230, _imopVarPre229); /*[]*/ /*[]*/ int _imopVarPre233; /*[]*/ _imopVarPre233 = fgetc(fp); /*[]*/ /*[]*/ /*[]*/ while (_imopVarPre233 != '\n') { /*[]*/ /*[]*/ ; /*[]*/ _imopVarPre233 = fgetc(fp); /*[]*/ } /*[]*/ fclose(fp); /*[]*/ } else { /*[]*/ /*[]*/ ipr = 1; /*[]*/ inorm = 50; /*[]*/ itmax = 50; /*[]*/ dt = 0.5; /*[]*/ omega = 1.2; /*[]*/ tolrsd[0] = 1.0e-8; /*[]*/ tolrsd[1] = 1.0e-8; /*[]*/ tolrsd[2] = 1.0e-8; /*[]*/ tolrsd[3] = 1.0e-8; /*[]*/ tolrsd[4] = 1.0e-8; /*[]*/ nx0 = 12; /*[]*/ ny0 = 12; /*[]*/ nz0 = 12; } /*[]*/ int _imopVarPre234; /*[]*/ int _imopVarPre235; /*[]*/ _imopVarPre234 = nx0 < 4; /*[]*/ /*[]*/ if (!_imopVarPre234) { /*[]*/ /*[]*/ _imopVarPre235 = ny0 < 4; /*[]*/ /*[]*/ if (!_imopVarPre235) { /*[]*/ /*[]*/ _imopVarPre235 = nz0 < 4; } /*[]*/ _imopVarPre234 = _imopVarPre235; } /*[]*/ /*[]*/ if (_imopVarPre234) { /*[]*/ /*[]*/ printf(" PROBLEM SIZE IS TOO SMALL - \n" " SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n"); /*[]*/ /*[]*/ exit(1); /*[]*/ } /*[]*/ int _imopVarPre236; /*[]*/ int _imopVarPre237; /*[]*/ _imopVarPre236 = nx0 > 12; /*[]*/ /*[]*/ if (!_imopVarPre236) { /*[]*/ /*[]*/ _imopVarPre237 = ny0 > 12; /*[]*/ /*[]*/ if (!_imopVarPre237) { /*[]*/ /*[]*/ _imopVarPre237 = nz0 > 12; } /*[]*/ _imopVarPre236 = _imopVarPre237; } /*[]*/ /*[]*/ if (_imopVarPre236) { /*[]*/ /*[]*/ printf(" PROBLEM SIZE IS TOO LARGE - \n" " NX, NY AND NZ SHOULD BE EQUAL TO \n" " ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n"); /*[]*/ /*[]*/ exit(1); /*[]*/ } /*[]*/ printf(" Size: %3dx%3dx%3d\n", nx0, ny0, nz0); /*[]*/ /*[]*/ printf(" Iterations: %3d\n", itmax); /*[]*/ } /*[]*/ static void rhs() { /*[]*/ /*[]*/ int i; /*[]*/ int j; /*[]*/ int k; /*[]*/ int m; /*[]*/ int L1; /*[]*/ int L2; /*[]*/ int ist1; /*[]*/ int iend1; /*[]*/ int jst1; /*[]*/ int jend1; /*[]*/ double q; /*[]*/ double u21; /*[]*/ double u31; /*[]*/ double u41; /*[]*/ double tmp; /*[]*/ double u21i; /*[]*/ double u31i; /*[]*/ double u41i; /*[]*/ double u51i; /*[]*/ double u21j; /*[]*/ double u31j; /*[]*/ double u41j; /*[]*/ double u51j; /*[]*/ double u21k; /*[]*/ double u31k; /*[]*/ double u41k; /*[]*/ double u51k; /*[]*/ double u21im1; /*[]*/ double u31im1; /*[]*/ double u41im1; /*[]*/ double u51im1; /*[]*/ double u21jm1; /*[]*/ double u31jm1; /*[]*/ double u41jm1; /*[]*/ double u51jm1; /*[]*/ double u21km1; /*[]*/ double u31km1; /*[]*/ double u41km1; /*[]*/ double u51km1; /*[]*/ #pragma omp for nowait /*[]*/ /*[]*/ /*[]*/ for (i = 0; i <= nx - 1; i++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (j = 0; j <= ny - 1; j++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = 0; k <= nz - 1; k++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ rsd[i][j][k][m] = -frct[i][j][k][m]; } } } } /*[]*/ L1 = 0; /*[]*/ L2 = nx - 1; /*[]*/ #pragma omp for nowait /*[]*/ /*[]*/ /*[]*/ for (i = L1; i <= L2; i++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (j = jst; j <= jend; j++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = 1; k <= nz - 2; k++) { /*[]*/ /*[]*/ flux[i][j][k][0] = u[i][j][k][1]; /*[]*/ u21 = u[i][j][k][1] / u[i][j][k][0]; /*[]*/ q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; /*[]*/ flux[i][j][k][1] = u[i][j][k][1] * u21 + 0.40e+00 * (u[i][j][k][4] - q); /*[]*/ flux[i][j][k][2] = u[i][j][k][2] * u21; /*[]*/ flux[i][j][k][3] = u[i][j][k][3] * u21; /*[]*/ flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u21; } } } /*[]*/ #pragma omp for nowait /*[]*/ /*[]*/ /*[]*/ for (j = jst; j <= jend; j++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = 1; k <= nz - 2; k++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (i = ist; i <= iend; i++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ rsd[i][j][k][m] = rsd[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]); } } /*[]*/ L2 = nx - 1; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (i = ist; i <= L2; i++) { /*[]*/ /*[]*/ tmp = 1.0 / u[i][j][k][0]; /*[]*/ u21i = tmp * u[i][j][k][1]; /*[]*/ u31i = tmp * u[i][j][k][2]; /*[]*/ u41i = tmp * u[i][j][k][3]; /*[]*/ u51i = tmp * u[i][j][k][4]; /*[]*/ tmp = 1.0 / u[i - 1][j][k][0]; /*[]*/ u21im1 = tmp * u[i - 1][j][k][1]; /*[]*/ u31im1 = tmp * u[i - 1][j][k][2]; /*[]*/ u41im1 = tmp * u[i - 1][j][k][3]; /*[]*/ u51im1 = tmp * u[i - 1][j][k][4]; /*[]*/ flux[i][j][k][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /*[]*/ flux[i][j][k][2] = tx3 * (u31i - u31im1); /*[]*/ flux[i][j][k][3] = tx3 * (u41i - u41im1); /*[]*/ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (i = ist; i <= iend; i++) { /*[]*/ /*[]*/ rsd[i][j][k][0] = rsd[i][j][k][0] + dx1 * tx1 * (u[i - 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i + 1][j][k][0]); /*[]*/ rsd[i][j][k][1] = rsd[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (u[i - 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i + 1][j][k][1]); /*[]*/ rsd[i][j][k][2] = rsd[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (u[i - 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i + 1][j][k][2]); /*[]*/ rsd[i][j][k][3] = rsd[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (u[i - 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i + 1][j][k][3]); /*[]*/ rsd[i][j][k][4] = rsd[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (u[i - 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i + 1][j][k][4]); } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ rsd[1][j][k][m] = rsd[1][j][k][m] - dssp * (+5.0 * u[1][j][k][m] - 4.0 * u[2][j][k][m] + u[3][j][k][m]); /*[]*/ rsd[2][j][k][m] = rsd[2][j][k][m] - dssp * (-4.0 * u[1][j][k][m] + 6.0 * u[2][j][k][m] - 4.0 * u[3][j][k][m] + u[4][j][k][m]); } /*[]*/ ist1 = 3; /*[]*/ iend1 = nx - 4; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (i = ist1; i <= iend1; i++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]); } } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ rsd[nx - 3][j][k][m] = rsd[nx - 3][j][k][m] - dssp * (u[nx - 5][j][k][m] - 4.0 * u[nx - 4][j][k][m] + 6.0 * u[nx - 3][j][k][m] - 4.0 * u[nx - 2][j][k][m]); /*[]*/ rsd[nx - 2][j][k][m] = rsd[nx - 2][j][k][m] - dssp * (u[nx - 4][j][k][m] - 4.0 * u[nx - 3][j][k][m] + 5.0 * u[nx - 2][j][k][m]); } } } /*[]*/ L1 = 0; /*[]*/ L2 = ny - 1; /*[]*/ #pragma omp for nowait /*[]*/ /*[]*/ /*[]*/ for (i = ist; i <= iend; i++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (j = L1; j <= L2; j++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = 1; k <= nz - 2; k++) { /*[]*/ /*[]*/ flux[i][j][k][0] = u[i][j][k][2]; /*[]*/ u31 = u[i][j][k][2] / u[i][j][k][0]; /*[]*/ q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; /*[]*/ flux[i][j][k][1] = u[i][j][k][1] * u31; /*[]*/ flux[i][j][k][2] = u[i][j][k][2] * u31 + 0.40e+00 * (u[i][j][k][4] - q); /*[]*/ flux[i][j][k][3] = u[i][j][k][3] * u31; /*[]*/ flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u31; } } } /*[]*/ #pragma omp for nowait /*[]*/ /*[]*/ /*[]*/ for (i = ist; i <= iend; i++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = 1; k <= nz - 2; k++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (j = jst; j <= jend; j++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ rsd[i][j][k][m] = rsd[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]); } } /*[]*/ L2 = ny - 1; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (j = jst; j <= L2; j++) { /*[]*/ /*[]*/ tmp = 1.0 / u[i][j][k][0]; /*[]*/ u21j = tmp * u[i][j][k][1]; /*[]*/ u31j = tmp * u[i][j][k][2]; /*[]*/ u41j = tmp * u[i][j][k][3]; /*[]*/ u51j = tmp * u[i][j][k][4]; /*[]*/ tmp = 1.0 / u[i][j - 1][k][0]; /*[]*/ u21jm1 = tmp * u[i][j - 1][k][1]; /*[]*/ u31jm1 = tmp * u[i][j - 1][k][2]; /*[]*/ u41jm1 = tmp * u[i][j - 1][k][3]; /*[]*/ u51jm1 = tmp * u[i][j - 1][k][4]; /*[]*/ flux[i][j][k][1] = ty3 * (u21j - u21jm1); /*[]*/ flux[i][j][k][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /*[]*/ flux[i][j][k][3] = ty3 * (u41j - u41jm1); /*[]*/ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (j = jst; j <= jend; j++) { /*[]*/ /*[]*/ rsd[i][j][k][0] = rsd[i][j][k][0] + dy1 * ty1 * (u[i][j - 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j + 1][k][0]); /*[]*/ rsd[i][j][k][1] = rsd[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (u[i][j - 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j + 1][k][1]); /*[]*/ rsd[i][j][k][2] = rsd[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (u[i][j - 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j + 1][k][2]); /*[]*/ rsd[i][j][k][3] = rsd[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (u[i][j - 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j + 1][k][3]); /*[]*/ rsd[i][j][k][4] = rsd[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (u[i][j - 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j + 1][k][4]); } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ rsd[i][1][k][m] = rsd[i][1][k][m] - dssp * (+5.0 * u[i][1][k][m] - 4.0 * u[i][2][k][m] + u[i][3][k][m]); /*[]*/ rsd[i][2][k][m] = rsd[i][2][k][m] - dssp * (-4.0 * u[i][1][k][m] + 6.0 * u[i][2][k][m] - 4.0 * u[i][3][k][m] + u[i][4][k][m]); } /*[]*/ jst1 = 3; /*[]*/ jend1 = ny - 4; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (j = jst1; j <= jend1; j++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]); } } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ rsd[i][ny - 3][k][m] = rsd[i][ny - 3][k][m] - dssp * (u[i][ny - 5][k][m] - 4.0 * u[i][ny - 4][k][m] + 6.0 * u[i][ny - 3][k][m] - 4.0 * u[i][ny - 2][k][m]); /*[]*/ rsd[i][ny - 2][k][m] = rsd[i][ny - 2][k][m] - dssp * (u[i][ny - 4][k][m] - 4.0 * u[i][ny - 3][k][m] + 5.0 * u[i][ny - 2][k][m]); } } } /*[]*/ #pragma omp for nowait /*[]*/ /*[]*/ /*[]*/ for (i = ist; i <= iend; i++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (j = jst; j <= jend; j++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = 0; k <= nz - 1; k++) { /*[]*/ /*[]*/ flux[i][j][k][0] = u[i][j][k][3]; /*[]*/ u41 = u[i][j][k][3] / u[i][j][k][0]; /*[]*/ q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; /*[]*/ flux[i][j][k][1] = u[i][j][k][1] * u41; /*[]*/ flux[i][j][k][2] = u[i][j][k][2] * u41; /*[]*/ flux[i][j][k][3] = u[i][j][k][3] * u41 + 0.40e+00 * (u[i][j][k][4] - q); /*[]*/ flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u41; } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = 1; k <= nz - 2; k++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ rsd[i][j][k][m] = rsd[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]); } } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = 1; k <= nz - 1; k++) { /*[]*/ /*[]*/ tmp = 1.0 / u[i][j][k][0]; /*[]*/ u21k = tmp * u[i][j][k][1]; /*[]*/ u31k = tmp * u[i][j][k][2]; /*[]*/ u41k = tmp * u[i][j][k][3]; /*[]*/ u51k = tmp * u[i][j][k][4]; /*[]*/ tmp = 1.0 / u[i][j][k - 1][0]; /*[]*/ u21km1 = tmp * u[i][j][k - 1][1]; /*[]*/ u31km1 = tmp * u[i][j][k - 1][2]; /*[]*/ u41km1 = tmp * u[i][j][k - 1][3]; /*[]*/ u51km1 = tmp * u[i][j][k - 1][4]; /*[]*/ flux[i][j][k][1] = tz3 * (u21k - u21km1); /*[]*/ flux[i][j][k][2] = tz3 * (u31k - u31km1); /*[]*/ flux[i][j][k][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /*[]*/ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = 1; k <= nz - 2; k++) { /*[]*/ /*[]*/ rsd[i][j][k][0] = rsd[i][j][k][0] + dz1 * tz1 * (u[i][j][k - 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k + 1][0]); /*[]*/ rsd[i][j][k][1] = rsd[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (u[i][j][k - 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k + 1][1]); /*[]*/ rsd[i][j][k][2] = rsd[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (u[i][j][k - 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k + 1][2]); /*[]*/ rsd[i][j][k][3] = rsd[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (u[i][j][k - 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k + 1][3]); /*[]*/ rsd[i][j][k][4] = rsd[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (u[i][j][k - 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k + 1][4]); } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ rsd[i][j][1][m] = rsd[i][j][1][m] - dssp * (+5.0 * u[i][j][1][m] - 4.0 * u[i][j][2][m] + u[i][j][3][m]); /*[]*/ rsd[i][j][2][m] = rsd[i][j][2][m] - dssp * (-4.0 * u[i][j][1][m] + 6.0 * u[i][j][2][m] - 4.0 * u[i][j][3][m] + u[i][j][4][m]); } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (k = 3; k <= nz - 4; k++) { /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]); } } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ rsd[i][j][nz - 3][m] = rsd[i][j][nz - 3][m] - dssp * (u[i][j][nz - 5][m] - 4.0 * u[i][j][nz - 4][m] + 6.0 * u[i][j][nz - 3][m] - 4.0 * u[i][j][nz - 2][m]); /*[]*/ rsd[i][j][nz - 2][m] = rsd[i][j][nz - 2][m] - dssp * (u[i][j][nz - 4][m] - 4.0 * u[i][j][nz - 3][m] + 5.0 * u[i][j][nz - 2][m]); } } } } /*[]*/ static void setbv() { /*[]*/ /*[49]*/ #pragma omp parallel { /*[49]*/ /*[49]*/ int i; /*[49]*/ int j; /*[49]*/ int k; /*[49]*/ int iglob; /*[49]*/ int jglob; /*[49]*/ #pragma omp for nowait /*[49]*/ /*[49]*/ /*[49]*/ for (i = 0; i < nx; i++) { /*[49]*/ /*[49]*/ iglob = i; /*[49]*/ /*[49]*/ /*[49]*/ /*[49]*/ for (j = 0; j < ny; j++) { /*[49]*/ /*[49]*/ jglob = j; /*[49]*/ double *_imopVarPre239; /*[49]*/ _imopVarPre239 = &u[i][j][0][0]; /*[49]*/ exact(iglob, jglob, 0, _imopVarPre239); /*[49]*/ /*[49]*/ double *_imopVarPre242; /*[49]*/ int _imopVarPre243; /*[49]*/ _imopVarPre242 = &u[i][j][nz - 1][0]; /*[49]*/ _imopVarPre243 = nz - 1; /*[49]*/ exact(iglob, jglob, _imopVarPre243, _imopVarPre242); /*[49]*/ } } /*[49]*/ // #pragma omp dummyFlush BARRIER_START /*[49]*/ #pragma omp barrier /*[50]*/ #pragma omp for nowait /*[50]*/ /*[50]*/ /*[50]*/ for (i = 0; i < nx; i++) { /*[50]*/ /*[50]*/ iglob = i; /*[50]*/ /*[50]*/ /*[50]*/ /*[50]*/ for (k = 0; k < nz; k++) { /*[50]*/ /*[50]*/ double *_imopVarPre245; /*[50]*/ _imopVarPre245 = &u[i][0][k][0]; /*[50]*/ exact(iglob, 0, k, _imopVarPre245); /*[50]*/ } } /*[50]*/ // #pragma omp dummyFlush BARRIER_START /*[50]*/ #pragma omp barrier /*[51]*/ #pragma omp for nowait /*[51]*/ /*[51]*/ /*[51]*/ for (i = 0; i < nx; i++) { /*[51]*/ /*[51]*/ iglob = i; /*[51]*/ /*[51]*/ /*[51]*/ /*[51]*/ for (k = 0; k < nz; k++) { /*[51]*/ /*[51]*/ double *_imopVarPre248; /*[51]*/ int _imopVarPre249; /*[51]*/ _imopVarPre248 = &u[i][ny - 1][k][0]; /*[51]*/ _imopVarPre249 = ny0 - 1; /*[51]*/ exact(iglob, _imopVarPre249, k, _imopVarPre248); /*[51]*/ } } /*[51]*/ // #pragma omp dummyFlush BARRIER_START /*[51]*/ #pragma omp barrier /*[52]*/ #pragma omp for nowait /*[52]*/ /*[52]*/ /*[52]*/ for (j = 0; j < ny; j++) { /*[52]*/ /*[52]*/ jglob = j; /*[52]*/ /*[52]*/ /*[52]*/ /*[52]*/ for (k = 0; k < nz; k++) { /*[52]*/ /*[52]*/ double *_imopVarPre251; /*[52]*/ _imopVarPre251 = &u[0][j][k][0]; /*[52]*/ exact(0, jglob, k, _imopVarPre251); /*[52]*/ } } /*[52]*/ // #pragma omp dummyFlush BARRIER_START /*[52]*/ #pragma omp barrier /*[53]*/ #pragma omp for nowait /*[53]*/ /*[53]*/ /*[53]*/ for (j = 0; j < ny; j++) { /*[53]*/ /*[53]*/ jglob = j; /*[53]*/ /*[53]*/ /*[53]*/ /*[53]*/ for (k = 0; k < nz; k++) { /*[53]*/ /*[53]*/ double *_imopVarPre254; /*[53]*/ int _imopVarPre255; /*[53]*/ _imopVarPre254 = &u[nx - 1][j][k][0]; /*[53]*/ _imopVarPre255 = nx0 - 1; /*[53]*/ exact(_imopVarPre255, jglob, k, _imopVarPre254); /*[53]*/ } } } } /*[]*/ static void setcoeff() { /*[]*/ /*[]*/ dxi = 1.0 / (nx0 - 1); /*[]*/ deta = 1.0 / (ny0 - 1); /*[]*/ dzeta = 1.0 / (nz0 - 1); /*[]*/ tx1 = 1.0 / (dxi * dxi); /*[]*/ tx2 = 1.0 / (2.0 * dxi); /*[]*/ tx3 = 1.0 / dxi; /*[]*/ ty1 = 1.0 / (deta * deta); /*[]*/ ty2 = 1.0 / (2.0 * deta); /*[]*/ ty3 = 1.0 / deta; /*[]*/ tz1 = 1.0 / (dzeta * dzeta); /*[]*/ tz2 = 1.0 / (2.0 * dzeta); /*[]*/ tz3 = 1.0 / dzeta; /*[]*/ ii1 = 1; /*[]*/ ii2 = nx0 - 2; /*[]*/ ji1 = 1; /*[]*/ ji2 = ny0 - 3; /*[]*/ ki1 = 2; /*[]*/ ki2 = nz0 - 2; /*[]*/ dx1 = 0.75; /*[]*/ dx2 = dx1; /*[]*/ dx3 = dx1; /*[]*/ dx4 = dx1; /*[]*/ dx5 = dx1; /*[]*/ dy1 = 0.75; /*[]*/ dy2 = dy1; /*[]*/ dy3 = dy1; /*[]*/ dy4 = dy1; /*[]*/ dy5 = dy1; /*[]*/ dz1 = 1.00; /*[]*/ dz2 = dz1; /*[]*/ dz3 = dz1; /*[]*/ dz4 = dz1; /*[]*/ dz5 = dz1; /*[]*/ int _imopVarPre348; /*[]*/ double _imopVarPre349; /*[]*/ int _imopVarPre350; /*[]*/ double _imopVarPre351; /*[]*/ int _imopVarPre358; /*[]*/ double _imopVarPre359; /*[]*/ _imopVarPre348 = (dy1 > dz1); /*[]*/ /*[]*/ if (_imopVarPre348) { /*[]*/ /*[]*/ _imopVarPre349 = dy1; } else { /*[]*/ /*[]*/ _imopVarPre349 = dz1; } /*[]*/ _imopVarPre350 = (dx1 > _imopVarPre349); /*[]*/ /*[]*/ if (_imopVarPre350) { /*[]*/ /*[]*/ _imopVarPre351 = dx1; } else { /*[]*/ /*[]*/ _imopVarPre358 = (dy1 > dz1); /*[]*/ /*[]*/ if (_imopVarPre358) { /*[]*/ /*[]*/ _imopVarPre359 = dy1; } else { /*[]*/ /*[]*/ _imopVarPre359 = dz1; } /*[]*/ _imopVarPre351 = _imopVarPre359; } /*[]*/ dssp = _imopVarPre351 / 4.0; /*[]*/ ce[0][0] = 2.0; /*[]*/ ce[0][1] = 0.0; /*[]*/ ce[0][2] = 0.0; /*[]*/ ce[0][3] = 4.0; /*[]*/ ce[0][4] = 5.0; /*[]*/ ce[0][5] = 3.0; /*[]*/ ce[0][6] = 5.0e-01; /*[]*/ ce[0][7] = 2.0e-02; /*[]*/ ce[0][8] = 1.0e-02; /*[]*/ ce[0][9] = 3.0e-02; /*[]*/ ce[0][10] = 5.0e-01; /*[]*/ ce[0][11] = 4.0e-01; /*[]*/ ce[0][12] = 3.0e-01; /*[]*/ ce[1][0] = 1.0; /*[]*/ ce[1][1] = 0.0; /*[]*/ ce[1][2] = 0.0; /*[]*/ ce[1][3] = 0.0; /*[]*/ ce[1][4] = 1.0; /*[]*/ ce[1][5] = 2.0; /*[]*/ ce[1][6] = 3.0; /*[]*/ ce[1][7] = 1.0e-02; /*[]*/ ce[1][8] = 3.0e-02; /*[]*/ ce[1][9] = 2.0e-02; /*[]*/ ce[1][10] = 4.0e-01; /*[]*/ ce[1][11] = 3.0e-01; /*[]*/ ce[1][12] = 5.0e-01; /*[]*/ ce[2][0] = 2.0; /*[]*/ ce[2][1] = 2.0; /*[]*/ ce[2][2] = 0.0; /*[]*/ ce[2][3] = 0.0; /*[]*/ ce[2][4] = 0.0; /*[]*/ ce[2][5] = 2.0; /*[]*/ ce[2][6] = 3.0; /*[]*/ ce[2][7] = 4.0e-02; /*[]*/ ce[2][8] = 3.0e-02; /*[]*/ ce[2][9] = 5.0e-02; /*[]*/ ce[2][10] = 3.0e-01; /*[]*/ ce[2][11] = 5.0e-01; /*[]*/ ce[2][12] = 4.0e-01; /*[]*/ ce[3][0] = 2.0; /*[]*/ ce[3][1] = 2.0; /*[]*/ ce[3][2] = 0.0; /*[]*/ ce[3][3] = 0.0; /*[]*/ ce[3][4] = 0.0; /*[]*/ ce[3][5] = 2.0; /*[]*/ ce[3][6] = 3.0; /*[]*/ ce[3][7] = 3.0e-02; /*[]*/ ce[3][8] = 5.0e-02; /*[]*/ ce[3][9] = 4.0e-02; /*[]*/ ce[3][10] = 2.0e-01; /*[]*/ ce[3][11] = 1.0e-01; /*[]*/ ce[3][12] = 3.0e-01; /*[]*/ ce[4][0] = 5.0; /*[]*/ ce[4][1] = 4.0; /*[]*/ ce[4][2] = 3.0; /*[]*/ ce[4][3] = 2.0; /*[]*/ ce[4][4] = 1.0e-01; /*[]*/ ce[4][5] = 4.0e-01; /*[]*/ ce[4][6] = 3.0e-01; /*[]*/ ce[4][7] = 5.0e-02; /*[]*/ ce[4][8] = 4.0e-02; /*[]*/ ce[4][9] = 3.0e-02; /*[]*/ ce[4][10] = 1.0e-01; /*[]*/ ce[4][11] = 3.0e-01; /*[]*/ ce[4][12] = 2.0e-01; } /*[]*/ static void setiv() { /*[]*/ /*[54]*/ #pragma omp parallel { /*[54]*/ /*[54]*/ int i; /*[54]*/ int j; /*[54]*/ int k; /*[54]*/ int m; /*[54]*/ int iglob; /*[54]*/ int jglob; /*[54]*/ double xi; /*[54]*/ double eta; /*[54]*/ double zeta; /*[54]*/ double pxi; /*[54]*/ double peta; /*[54]*/ double pzeta; /*[54]*/ double ue_1jk[5]; /*[54]*/ double ue_nx0jk[5]; /*[54]*/ double ue_i1k[5]; /*[54]*/ double ue_iny0k[5]; /*[54]*/ double ue_ij1[5]; /*[54]*/ double ue_ijnz[5]; /*[54]*/ #pragma omp for nowait /*[54]*/ /*[54]*/ /*[54]*/ for (j = 0; j < ny; j++) { /*[54]*/ /*[54]*/ jglob = j; /*[54]*/ /*[54]*/ /*[54]*/ /*[54]*/ for (k = 1; k < nz - 1; k++) { /*[54]*/ /*[54]*/ zeta = ((double) k) / (nz - 1); /*[54]*/ int _imopVarPre361; /*[54]*/ _imopVarPre361 = jglob != 0; /*[54]*/ /*[54]*/ if (_imopVarPre361) { /*[54]*/ /*[54]*/ _imopVarPre361 = jglob != ny0 - 1; } /*[54]*/ /*[54]*/ if (_imopVarPre361) { /*[54]*/ /*[54]*/ eta = ((double) jglob) / (ny0 - 1); /*[54]*/ /*[54]*/ /*[54]*/ /*[54]*/ for (i = 0; i < nx; i++) { /*[54]*/ /*[54]*/ iglob = i; /*[54]*/ int _imopVarPre363; /*[54]*/ _imopVarPre363 = iglob != 0; /*[54]*/ /*[54]*/ if (_imopVarPre363) { /*[54]*/ /*[54]*/ _imopVarPre363 = iglob != nx0 - 1; } /*[54]*/ /*[54]*/ if (_imopVarPre363) { /*[54]*/ /*[54]*/ xi = ((double) iglob) / (nx0 - 1); /*[54]*/ exact(0, jglob, k, ue_1jk); /*[54]*/ /*[54]*/ int _imopVarPre365; /*[54]*/ _imopVarPre365 = nx0 - 1; /*[54]*/ exact(_imopVarPre365, jglob, k, ue_nx0jk); /*[54]*/ /*[54]*/ exact(iglob, 0, k, ue_i1k); /*[54]*/ /*[54]*/ int _imopVarPre367; /*[54]*/ _imopVarPre367 = ny0 - 1; /*[54]*/ exact(iglob, _imopVarPre367, k, ue_iny0k); /*[54]*/ /*[54]*/ exact(iglob, jglob, 0, ue_ij1); /*[54]*/ /*[54]*/ int _imopVarPre369; /*[54]*/ _imopVarPre369 = nz - 1; /*[54]*/ exact(iglob, jglob, _imopVarPre369, ue_ijnz); /*[54]*/ /*[54]*/ /*[54]*/ /*[54]*/ /*[54]*/ for (m = 0; m < 5; m++) { /*[54]*/ /*[54]*/ pxi = (1.0 - xi) * ue_1jk[m] + xi * ue_nx0jk[m]; /*[54]*/ peta = (1.0 - eta) * ue_i1k[m] + eta * ue_iny0k[m]; /*[54]*/ pzeta = (1.0 - zeta) * ue_ij1[m] + zeta * ue_ijnz[m]; /*[54]*/ u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta; } } } } } } } } /*[]*/ static void ssor() { /*[]*/ /*[]*/ int i; /*[]*/ int j; /*[]*/ int k; /*[]*/ int m; /*[]*/ int istep; /*[]*/ double tmp; /*[]*/ double delunm[5]; /*[]*/ double tv[12][12][5]; /*[]*/ tmp = 1.0 / (omega * (2.0 - omega)); /*[55]*/ #pragma omp parallel private(i, j, k, m) { /*[55]*/ /*[55]*/ #pragma omp for nowait /*[55]*/ /*[55]*/ /*[55]*/ for (i = 0; i < 12; i++) { /*[55]*/ /*[55]*/ /*[55]*/ /*[55]*/ /*[55]*/ for (j = 0; j < 12; j++) { /*[55]*/ /*[55]*/ /*[55]*/ /*[55]*/ /*[55]*/ for (k = 0; k < 5; k++) { /*[55]*/ /*[55]*/ /*[55]*/ /*[55]*/ /*[55]*/ for (m = 0; m < 5; m++) { /*[55]*/ /*[55]*/ a[i][j][k][m] = 0.0; /*[55]*/ b[i][j][k][m] = 0.0; /*[55]*/ c[i][j][k][m] = 0.0; /*[55]*/ d[i][j][k][m] = 0.0; } } } } } /*[56]*/ #pragma omp parallel { /*[56]*/ /*[56]*/ int i_imopVarPre84; /*[56]*/ int j_imopVarPre85; /*[56]*/ int k_imopVarPre86; /*[56]*/ int m_imopVarPre87; /*[56]*/ int L1; /*[56]*/ int L2; /*[56]*/ int ist1; /*[56]*/ int iend1; /*[56]*/ int jst1; /*[56]*/ int jend1; /*[56]*/ double q; /*[56]*/ double u21; /*[56]*/ double u31; /*[56]*/ double u41; /*[56]*/ double tmp_imopVarPre88; /*[56]*/ double u21i; /*[56]*/ double u31i; /*[56]*/ double u41i; /*[56]*/ double u51i; /*[56]*/ double u21j; /*[56]*/ double u31j; /*[56]*/ double u41j; /*[56]*/ double u51j; /*[56]*/ double u21k; /*[56]*/ double u31k; /*[56]*/ double u41k; /*[56]*/ double u51k; /*[56]*/ double u21im1; /*[56]*/ double u31im1; /*[56]*/ double u41im1; /*[56]*/ double u51im1; /*[56]*/ double u21jm1; /*[56]*/ double u31jm1; /*[56]*/ double u41jm1; /*[56]*/ double u51jm1; /*[56]*/ double u21km1; /*[56]*/ double u31km1; /*[56]*/ double u41km1; /*[56]*/ double u51km1; /*[56]*/ #pragma omp for nowait /*[56]*/ /*[56]*/ /*[56]*/ for (i_imopVarPre84 = 0; i_imopVarPre84 <= nx - 1; i_imopVarPre84++) { /*[56]*/ /*[56]*/ /*[56]*/ /*[56]*/ /*[56]*/ for (j_imopVarPre85 = 0; j_imopVarPre85 <= ny - 1; j_imopVarPre85++) { /*[56]*/ /*[56]*/ /*[56]*/ /*[56]*/ /*[56]*/ for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /*[56]*/ /*[56]*/ /*[56]*/ /*[56]*/ /*[56]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[56]*/ /*[56]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = -frct[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]; } } } } /*[56]*/ L1 = 0; /*[56]*/ L2 = nx - 1; /*[56]*/ #pragma omp for nowait /*[56]*/ /*[56]*/ /*[56]*/ for (i_imopVarPre84 = L1; i_imopVarPre84 <= L2; i_imopVarPre84++) { /*[56]*/ /*[56]*/ /*[56]*/ /*[56]*/ /*[56]*/ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /*[56]*/ /*[56]*/ /*[56]*/ /*[56]*/ /*[56]*/ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /*[56]*/ /*[56]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /*[56]*/ u21 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[56]*/ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[56]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u21 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /*[56]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u21; /*[56]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u21; /*[56]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u21; } } } /*[56]*/ // #pragma omp dummyFlush BARRIER_START /*[56]*/ #pragma omp barrier /*[57]*/ #pragma omp for nowait /*[57]*/ /*[57]*/ /*[57]*/ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /*[57]*/ /*[57]*/ /*[57]*/ /*[57]*/ /*[57]*/ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /*[57]*/ /*[57]*/ /*[57]*/ /*[57]*/ /*[57]*/ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /*[57]*/ /*[57]*/ /*[57]*/ /*[57]*/ /*[57]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[57]*/ /*[57]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tx2 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } /*[57]*/ L2 = nx - 1; /*[57]*/ /*[57]*/ /*[57]*/ /*[57]*/ for (i_imopVarPre84 = ist; i_imopVarPre84 <= L2; i_imopVarPre84++) { /*[57]*/ /*[57]*/ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[57]*/ u21i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /*[57]*/ u31i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /*[57]*/ u41i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /*[57]*/ u51i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /*[57]*/ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0]; /*[57]*/ u21im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1]; /*[57]*/ u31im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2]; /*[57]*/ u41im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3]; /*[57]*/ u51im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4]; /*[57]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /*[57]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tx3 * (u31i - u31im1); /*[57]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = tx3 * (u41i - u41im1); /*[57]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /*[57]*/ /*[57]*/ /*[57]*/ /*[57]*/ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /*[57]*/ /*[57]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dx1 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][0]); /*[57]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dx2 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1]); /*[57]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dx3 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2]); /*[57]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dx4 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3]); /*[57]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dx5 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4]); } /*[57]*/ /*[57]*/ /*[57]*/ /*[57]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[57]*/ /*[57]*/ rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); /*[57]*/ rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } /*[57]*/ ist1 = 3; /*[57]*/ iend1 = nx - 4; /*[57]*/ /*[57]*/ /*[57]*/ /*[57]*/ for (i_imopVarPre84 = ist1; i_imopVarPre84 <= iend1; i_imopVarPre84++) { /*[57]*/ /*[57]*/ /*[57]*/ /*[57]*/ /*[57]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[57]*/ /*[57]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84 - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84 + 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } /*[57]*/ /*[57]*/ /*[57]*/ /*[57]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[57]*/ /*[57]*/ rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 5][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); /*[57]*/ rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } } /*[57]*/ // #pragma omp dummyFlush BARRIER_START /*[57]*/ #pragma omp barrier /*[58]*/ L1 = 0; /*[58]*/ L2 = ny - 1; /*[58]*/ #pragma omp for nowait /*[58]*/ /*[58]*/ /*[58]*/ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /*[58]*/ /*[58]*/ /*[58]*/ /*[58]*/ /*[58]*/ for (j_imopVarPre85 = L1; j_imopVarPre85 <= L2; j_imopVarPre85++) { /*[58]*/ /*[58]*/ /*[58]*/ /*[58]*/ /*[58]*/ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /*[58]*/ /*[58]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /*[58]*/ u31 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[58]*/ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[58]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u31; /*[58]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u31 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /*[58]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u31; /*[58]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u31; } } } /*[58]*/ // #pragma omp dummyFlush BARRIER_START /*[58]*/ #pragma omp barrier /*[59]*/ #pragma omp for nowait /*[59]*/ /*[59]*/ /*[59]*/ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /*[59]*/ /*[59]*/ /*[59]*/ /*[59]*/ /*[59]*/ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /*[59]*/ /*[59]*/ /*[59]*/ /*[59]*/ /*[59]*/ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /*[59]*/ /*[59]*/ /*[59]*/ /*[59]*/ /*[59]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[59]*/ /*[59]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - ty2 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87]); } } /*[59]*/ L2 = ny - 1; /*[59]*/ /*[59]*/ /*[59]*/ /*[59]*/ for (j_imopVarPre85 = jst; j_imopVarPre85 <= L2; j_imopVarPre85++) { /*[59]*/ /*[59]*/ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[59]*/ u21j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /*[59]*/ u31j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /*[59]*/ u41j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /*[59]*/ u51j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /*[59]*/ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0]; /*[59]*/ u21jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1]; /*[59]*/ u31jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2]; /*[59]*/ u41jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3]; /*[59]*/ u51jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4]; /*[59]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = ty3 * (u21j - u21jm1); /*[59]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /*[59]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = ty3 * (u41j - u41jm1); /*[59]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /*[59]*/ /*[59]*/ /*[59]*/ /*[59]*/ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /*[59]*/ /*[59]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dy1 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][0]); /*[59]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dy2 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1]); /*[59]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dy3 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2]); /*[59]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dy4 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3]); /*[59]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dy5 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4]); } /*[59]*/ /*[59]*/ /*[59]*/ /*[59]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[59]*/ /*[59]*/ rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87]); /*[59]*/ rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][4][k_imopVarPre86][m_imopVarPre87]); } /*[59]*/ jst1 = 3; /*[59]*/ jend1 = ny - 4; /*[59]*/ /*[59]*/ /*[59]*/ /*[59]*/ for (j_imopVarPre85 = jst1; j_imopVarPre85 <= jend1; j_imopVarPre85++) { /*[59]*/ /*[59]*/ /*[59]*/ /*[59]*/ /*[59]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[59]*/ /*[59]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85 - 2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85 + 2][k_imopVarPre86][m_imopVarPre87]); } } /*[59]*/ /*[59]*/ /*[59]*/ /*[59]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[59]*/ /*[59]*/ rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 5][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]); /*[59]*/ rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]); } } } /*[59]*/ // #pragma omp dummyFlush BARRIER_START /*[59]*/ #pragma omp barrier /*[60]*/ #pragma omp for nowait /*[60]*/ /*[60]*/ /*[60]*/ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /*[60]*/ /*[60]*/ /*[60]*/ /*[60]*/ /*[60]*/ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /*[60]*/ /*[60]*/ /*[60]*/ /*[60]*/ /*[60]*/ for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /*[60]*/ /*[60]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /*[60]*/ u41 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[60]*/ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[60]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u41; /*[60]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u41; /*[60]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u41 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /*[60]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u41; } /*[60]*/ /*[60]*/ /*[60]*/ /*[60]*/ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /*[60]*/ /*[60]*/ /*[60]*/ /*[60]*/ /*[60]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[60]*/ /*[60]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tz2 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87]); } } /*[60]*/ /*[60]*/ /*[60]*/ /*[60]*/ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /*[60]*/ /*[60]*/ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /*[60]*/ u21k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /*[60]*/ u31k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /*[60]*/ u41k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /*[60]*/ u51k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /*[60]*/ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0]; /*[60]*/ u21km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1]; /*[60]*/ u31km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2]; /*[60]*/ u41km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3]; /*[60]*/ u51km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4]; /*[60]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = tz3 * (u21k - u21km1); /*[60]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tz3 * (u31k - u31km1); /*[60]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /*[60]*/ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /*[60]*/ /*[60]*/ /*[60]*/ /*[60]*/ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /*[60]*/ /*[60]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dz1 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][0]); /*[60]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dz2 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1]); /*[60]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dz3 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2]); /*[60]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dz4 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3]); /*[60]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dz5 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4]); } /*[60]*/ /*[60]*/ /*[60]*/ /*[60]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[60]*/ /*[60]*/ rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87]); /*[60]*/ rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][4][m_imopVarPre87]); } /*[60]*/ /*[60]*/ /*[60]*/ /*[60]*/ for (k_imopVarPre86 = 3; k_imopVarPre86 <= nz - 4; k_imopVarPre86++) { /*[60]*/ /*[60]*/ /*[60]*/ /*[60]*/ /*[60]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[60]*/ /*[60]*/ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 2][m_imopVarPre87]); } } /*[60]*/ /*[60]*/ /*[60]*/ /*[60]*/ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /*[60]*/ /*[60]*/ rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 5][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]); /*[60]*/ rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]); } } } } /*[61]*/ #pragma omp parallel { /*[61]*/ /*[61]*/ double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /*[61]*/ double *sum; /*[61]*/ v = rsd; /*[61]*/ sum = rsdnm; /*[61]*/ int i_imopVarPre75; /*[61]*/ int j_imopVarPre76; /*[61]*/ int k_imopVarPre77; /*[61]*/ int m_imopVarPre78; /*[61]*/ double sum0 = 0.0; /*[61]*/ double sum1 = 0.0; /*[61]*/ double sum2 = 0.0; /*[61]*/ double sum3 = 0.0; /*[61]*/ double sum4 = 0.0; /*[61]*/ #pragma omp single nowait { /*[61]*/ /*[61]*/ /*[61]*/ /*[61]*/ /*[61]*/ for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) { /*[61]*/ /*[61]*/ sum[m_imopVarPre78] = 0.0; } } /*[61]*/ // #pragma omp dummyFlush BARRIER_START /*[61]*/ #pragma omp barrier /*[62]*/ #pragma omp for nowait /*[62]*/ /*[62]*/ /*[62]*/ for (i_imopVarPre75 = ist; i_imopVarPre75 <= iend; i_imopVarPre75++) { /*[62]*/ /*[62]*/ /*[62]*/ /*[62]*/ /*[62]*/ for (j_imopVarPre76 = jst; j_imopVarPre76 <= jend; j_imopVarPre76++) { /*[62]*/ /*[62]*/ /*[62]*/ /*[62]*/ /*[62]*/ for (k_imopVarPre77 = 1; k_imopVarPre77 <= nz0 - 2; k_imopVarPre77++) { /*[62]*/ /*[62]*/ sum0 = sum0 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0]; /*[62]*/ sum1 = sum1 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1]; /*[62]*/ sum2 = sum2 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2]; /*[62]*/ sum3 = sum3 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3]; /*[62]*/ sum4 = sum4 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4]; } } } /*[62]*/ // #pragma omp dummyFlush CRITICAL_START /*[62]*/ #pragma omp critical { /*[62]*/ /*[62]*/ sum[0] += sum0; /*[62]*/ sum[1] += sum1; /*[62]*/ sum[2] += sum2; /*[62]*/ sum[3] += sum3; /*[62]*/ sum[4] += sum4; } /*[62]*/ // #pragma omp dummyFlush CRITICAL_END /*[62]*/ // #pragma omp dummyFlush BARRIER_START /*[62]*/ #pragma omp barrier /*[63]*/ #pragma omp single nowait { /*[63]*/ /*[63]*/ /*[63]*/ /*[63]*/ /*[63]*/ for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) { /*[63]*/ /*[63]*/ double _imopVarPre154; /*[63]*/ double _imopVarPre155; /*[63]*/ _imopVarPre154 = sum[m_imopVarPre78] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /*[63]*/ _imopVarPre155 = sqrt(_imopVarPre154); /*[63]*/ /*[63]*/ sum[m_imopVarPre78] = _imopVarPre155; } } } /*[]*/ timer_clear(1); /*[]*/ /*[]*/ timer_start(1); /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (istep = 1; istep <= itmax; istep++) { /*[]*/ /*[]*/ int _imopVarPre372; /*[]*/ int _imopVarPre370; /*[]*/ int _imopVarPre371; /*[]*/ _imopVarPre370 = istep % 20 == 0; /*[]*/ /*[]*/ if (!_imopVarPre370) { /*[]*/ /*[]*/ _imopVarPre371 = istep == itmax; /*[]*/ /*[]*/ if (!_imopVarPre371) { /*[]*/ /*[]*/ _imopVarPre371 = istep == 1; } /*[]*/ _imopVarPre370 = _imopVarPre371; } /*[]*/ /*[]*/ if (_imopVarPre370) { /*[]*/ /*[]*/ #pragma omp master { /*[]*/ /*[]*/ printf(" Time step %4d\n", istep); /*[]*/ } } /*[64]*/ #pragma omp parallel private(istep, i, j, k, m) { /*[64]*/ /*[64]*/ int _imopVarPre377; /*[64]*/ int _imopVarPre378; /*[64]*/ int _imopVarPre379; /*[64]*/ int _imopVarPre380; /*[64]*/ #pragma omp for nowait /*[64]*/ /*[64]*/ /*[64]*/ for (i = ist; i <= iend; i++) { /*[64]*/ /*[64]*/ /*[64]*/ /*[64]*/ /*[64]*/ for (j = jst; j <= jend; j++) { /*[64]*/ /*[64]*/ /*[64]*/ /*[64]*/ /*[64]*/ for (k = 1; k <= nz - 2; k++) { /*[64]*/ /*[64]*/ /*[64]*/ /*[64]*/ /*[64]*/ for (m = 0; m < 5; m++) { /*[64]*/ /*[64]*/ rsd[i][j][k][m] = dt * rsd[i][j][k][m]; } } } } /*[64]*/ // #pragma omp dummyFlush BARRIER_START /*[64]*/ #pragma omp barrier /*[41]*/ /*[41]*/ /*[41]*/ /*[41]*/ for (k = 1; k <= nz - 2; k++) { /*[41]*/ /*[41]*/ jacld(k); /*[41]*/ /*[41]*/ blts(nx, ny, nz, k, omega, rsd, a, b, c, d, ist, iend, jst, jend, nx0, ny0); /*[41]*/ } /*[41]*/ // #pragma omp dummyFlush BARRIER_START /*[41]*/ #pragma omp barrier /*[42]*/ /*[42]*/ /*[42]*/ /*[42]*/ for (k = nz - 2; k >= 1; k--) { /*[42]*/ /*[42]*/ jacu(k); /*[42]*/ /*[42]*/ buts(nx, ny, nz, k, omega, rsd, tv, d, a, b, c, ist, iend, jst, jend, nx0, ny0); /*[42]*/ } /*[42]*/ // #pragma omp dummyFlush BARRIER_START /*[42]*/ #pragma omp barrier /*[65]*/ #pragma omp for nowait /*[65]*/ /*[65]*/ /*[65]*/ for (i = ist; i <= iend; i++) { /*[65]*/ /*[65]*/ /*[65]*/ /*[65]*/ /*[65]*/ for (j = jst; j <= jend; j++) { /*[65]*/ /*[65]*/ /*[65]*/ /*[65]*/ /*[65]*/ for (k = 1; k <= nz - 2; k++) { /*[65]*/ /*[65]*/ /*[65]*/ /*[65]*/ /*[65]*/ for (m = 0; m < 5; m++) { /*[65]*/ /*[65]*/ u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m]; } } } } /*[65]*/ /*[65]*/ if (istep % inorm == 0) { /*[65]*/ /*[65]*/ double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /*[65]*/ double *sum; /*[65]*/ v = rsd; /*[65]*/ sum = delunm; /*[65]*/ int i_imopVarPre89; /*[65]*/ int j_imopVarPre90; /*[65]*/ int k_imopVarPre91; /*[65]*/ int m_imopVarPre92; /*[65]*/ double sum0 = 0.0; /*[65]*/ double sum1 = 0.0; /*[65]*/ double sum2 = 0.0; /*[65]*/ double sum3 = 0.0; /*[65]*/ double sum4 = 0.0; /*[65]*/ #pragma omp single nowait { /*[65]*/ /*[65]*/ /*[65]*/ /*[65]*/ /*[65]*/ for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) { /*[65]*/ /*[65]*/ sum[m_imopVarPre92] = 0.0; } } /*[65]*/ // #pragma omp dummyFlush BARRIER_START /*[65]*/ #pragma omp barrier /*[66]*/ #pragma omp for nowait /*[66]*/ /*[66]*/ /*[66]*/ for (i_imopVarPre89 = ist; i_imopVarPre89 <= iend; i_imopVarPre89++) { /*[66]*/ /*[66]*/ /*[66]*/ /*[66]*/ /*[66]*/ for (j_imopVarPre90 = jst; j_imopVarPre90 <= jend; j_imopVarPre90++) { /*[66]*/ /*[66]*/ /*[66]*/ /*[66]*/ /*[66]*/ for (k_imopVarPre91 = 1; k_imopVarPre91 <= nz0 - 2; k_imopVarPre91++) { /*[66]*/ /*[66]*/ sum0 = sum0 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0]; /*[66]*/ sum1 = sum1 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1]; /*[66]*/ sum2 = sum2 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2]; /*[66]*/ sum3 = sum3 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3]; /*[66]*/ sum4 = sum4 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4]; } } } /*[66]*/ // #pragma omp dummyFlush CRITICAL_START /*[66]*/ #pragma omp critical { /*[66]*/ /*[66]*/ sum[0] += sum0; /*[66]*/ sum[1] += sum1; /*[66]*/ sum[2] += sum2; /*[66]*/ sum[3] += sum3; /*[66]*/ sum[4] += sum4; } /*[66]*/ // #pragma omp dummyFlush CRITICAL_END /*[66]*/ // #pragma omp dummyFlush BARRIER_START /*[66]*/ #pragma omp barrier /*[67]*/ #pragma omp single nowait { /*[67]*/ /*[67]*/ /*[67]*/ /*[67]*/ /*[67]*/ for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) { /*[67]*/ /*[67]*/ double _imopVarPre154; /*[67]*/ double _imopVarPre155; /*[67]*/ _imopVarPre154 = sum[m_imopVarPre92] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /*[67]*/ _imopVarPre155 = sqrt(_imopVarPre154); /*[67]*/ /*[67]*/ sum[m_imopVarPre92] = _imopVarPre155; } } /*[67]*/ // #pragma omp dummyFlush BARRIER_START /*[67]*/ #pragma omp barrier /*[68]*/ // #pragma omp dummyFlush BARRIER_START /*[68]*/ #pragma omp barrier } /*[65, 69]*/ // #pragma omp dummyFlush BARRIER_START /*[65, 69]*/ #pragma omp barrier /*[66, 70]*/ int i_imopVarPre79; /*[66, 70]*/ int j_imopVarPre80; /*[66, 70]*/ int k_imopVarPre81; /*[66, 70]*/ int m_imopVarPre82; /*[66, 70]*/ int L1; /*[66, 70]*/ int L2; /*[66, 70]*/ int ist1; /*[66, 70]*/ int iend1; /*[66, 70]*/ int jst1; /*[66, 70]*/ int jend1; /*[66, 70]*/ double q; /*[66, 70]*/ double u21; /*[66, 70]*/ double u31; /*[66, 70]*/ double u41; /*[66, 70]*/ double tmp_imopVarPre83; /*[66, 70]*/ double u21i; /*[66, 70]*/ double u31i; /*[66, 70]*/ double u41i; /*[66, 70]*/ double u51i; /*[66, 70]*/ double u21j; /*[66, 70]*/ double u31j; /*[66, 70]*/ double u41j; /*[66, 70]*/ double u51j; /*[66, 70]*/ double u21k; /*[66, 70]*/ double u31k; /*[66, 70]*/ double u41k; /*[66, 70]*/ double u51k; /*[66, 70]*/ double u21im1; /*[66, 70]*/ double u31im1; /*[66, 70]*/ double u41im1; /*[66, 70]*/ double u51im1; /*[66, 70]*/ double u21jm1; /*[66, 70]*/ double u31jm1; /*[66, 70]*/ double u41jm1; /*[66, 70]*/ double u51jm1; /*[66, 70]*/ double u21km1; /*[66, 70]*/ double u31km1; /*[66, 70]*/ double u41km1; /*[66, 70]*/ double u51km1; /*[66, 70]*/ #pragma omp for nowait /*[66, 70]*/ /*[66, 70]*/ /*[66, 70]*/ for (i_imopVarPre79 = 0; i_imopVarPre79 <= nx - 1; i_imopVarPre79++) { /*[66, 70]*/ /*[66, 70]*/ /*[66, 70]*/ /*[66, 70]*/ /*[66, 70]*/ for (j_imopVarPre80 = 0; j_imopVarPre80 <= ny - 1; j_imopVarPre80++) { /*[66, 70]*/ /*[66, 70]*/ /*[66, 70]*/ /*[66, 70]*/ /*[66, 70]*/ for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /*[66, 70]*/ /*[66, 70]*/ /*[66, 70]*/ /*[66, 70]*/ /*[66, 70]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[66, 70]*/ /*[66, 70]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = -frct[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]; } } } } /*[66, 70]*/ // #pragma omp dummyFlush BARRIER_START /*[66, 70]*/ #pragma omp barrier /*[67, 71]*/ L1 = 0; /*[67, 71]*/ L2 = nx - 1; /*[67, 71]*/ #pragma omp for nowait /*[67, 71]*/ /*[67, 71]*/ /*[67, 71]*/ for (i_imopVarPre79 = L1; i_imopVarPre79 <= L2; i_imopVarPre79++) { /*[67, 71]*/ /*[67, 71]*/ /*[67, 71]*/ /*[67, 71]*/ /*[67, 71]*/ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /*[67, 71]*/ /*[67, 71]*/ /*[67, 71]*/ /*[67, 71]*/ /*[67, 71]*/ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /*[67, 71]*/ /*[67, 71]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /*[67, 71]*/ u21 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[67, 71]*/ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[67, 71]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u21 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /*[67, 71]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u21; /*[67, 71]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u21; /*[67, 71]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u21; } } } /*[67, 71]*/ // #pragma omp dummyFlush BARRIER_START /*[67, 71]*/ #pragma omp barrier /*[68, 72]*/ #pragma omp for nowait /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[68, 72]*/ /*[68, 72]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tx2 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } /*[68, 72]*/ L2 = nx - 1; /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ for (i_imopVarPre79 = ist; i_imopVarPre79 <= L2; i_imopVarPre79++) { /*[68, 72]*/ /*[68, 72]*/ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[68, 72]*/ u21i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /*[68, 72]*/ u31i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /*[68, 72]*/ u41i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /*[68, 72]*/ u51i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /*[68, 72]*/ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0]; /*[68, 72]*/ u21im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1]; /*[68, 72]*/ u31im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2]; /*[68, 72]*/ u41im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3]; /*[68, 72]*/ u51im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4]; /*[68, 72]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /*[68, 72]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tx3 * (u31i - u31im1); /*[68, 72]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = tx3 * (u41i - u41im1); /*[68, 72]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /*[68, 72]*/ /*[68, 72]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dx1 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][0]); /*[68, 72]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dx2 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1]); /*[68, 72]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dx3 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2]); /*[68, 72]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dx4 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3]); /*[68, 72]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dx5 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4]); } /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[68, 72]*/ /*[68, 72]*/ rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); /*[68, 72]*/ rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } /*[68, 72]*/ ist1 = 3; /*[68, 72]*/ iend1 = nx - 4; /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ for (i_imopVarPre79 = ist1; i_imopVarPre79 <= iend1; i_imopVarPre79++) { /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[68, 72]*/ /*[68, 72]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79 - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79 + 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ /*[68, 72]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[68, 72]*/ /*[68, 72]*/ rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 5][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); /*[68, 72]*/ rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } } /*[68, 72]*/ // #pragma omp dummyFlush BARRIER_START /*[68, 72]*/ #pragma omp barrier /*[69, 73]*/ L1 = 0; /*[69, 73]*/ L2 = ny - 1; /*[69, 73]*/ #pragma omp for nowait /*[69, 73]*/ /*[69, 73]*/ /*[69, 73]*/ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /*[69, 73]*/ /*[69, 73]*/ /*[69, 73]*/ /*[69, 73]*/ /*[69, 73]*/ for (j_imopVarPre80 = L1; j_imopVarPre80 <= L2; j_imopVarPre80++) { /*[69, 73]*/ /*[69, 73]*/ /*[69, 73]*/ /*[69, 73]*/ /*[69, 73]*/ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /*[69, 73]*/ /*[69, 73]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /*[69, 73]*/ u31 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[69, 73]*/ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[69, 73]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u31; /*[69, 73]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u31 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /*[69, 73]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u31; /*[69, 73]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u31; } } } /*[69, 73]*/ // #pragma omp dummyFlush BARRIER_START /*[69, 73]*/ #pragma omp barrier /*[70, 74]*/ #pragma omp for nowait /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[70, 74]*/ /*[70, 74]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - ty2 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82]); } } /*[70, 74]*/ L2 = ny - 1; /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ for (j_imopVarPre80 = jst; j_imopVarPre80 <= L2; j_imopVarPre80++) { /*[70, 74]*/ /*[70, 74]*/ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[70, 74]*/ u21j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /*[70, 74]*/ u31j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /*[70, 74]*/ u41j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /*[70, 74]*/ u51j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /*[70, 74]*/ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0]; /*[70, 74]*/ u21jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1]; /*[70, 74]*/ u31jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2]; /*[70, 74]*/ u41jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3]; /*[70, 74]*/ u51jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4]; /*[70, 74]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = ty3 * (u21j - u21jm1); /*[70, 74]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /*[70, 74]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = ty3 * (u41j - u41jm1); /*[70, 74]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /*[70, 74]*/ /*[70, 74]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dy1 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][0]); /*[70, 74]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dy2 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1]); /*[70, 74]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dy3 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2]); /*[70, 74]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dy4 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3]); /*[70, 74]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dy5 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4]); } /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[70, 74]*/ /*[70, 74]*/ rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82]); /*[70, 74]*/ rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][4][k_imopVarPre81][m_imopVarPre82]); } /*[70, 74]*/ jst1 = 3; /*[70, 74]*/ jend1 = ny - 4; /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ for (j_imopVarPre80 = jst1; j_imopVarPre80 <= jend1; j_imopVarPre80++) { /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[70, 74]*/ /*[70, 74]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80 - 2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80 + 2][k_imopVarPre81][m_imopVarPre82]); } } /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ /*[70, 74]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[70, 74]*/ /*[70, 74]*/ rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 5][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]); /*[70, 74]*/ rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]); } } } /*[70, 74]*/ // #pragma omp dummyFlush BARRIER_START /*[70, 74]*/ #pragma omp barrier /*[71, 75]*/ #pragma omp for nowait /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /*[71, 75]*/ /*[71, 75]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /*[71, 75]*/ u41 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[71, 75]*/ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[71, 75]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u41; /*[71, 75]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u41; /*[71, 75]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u41 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /*[71, 75]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u41; } /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[71, 75]*/ /*[71, 75]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tz2 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82]); } } /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /*[71, 75]*/ /*[71, 75]*/ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /*[71, 75]*/ u21k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /*[71, 75]*/ u31k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /*[71, 75]*/ u41k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /*[71, 75]*/ u51k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /*[71, 75]*/ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0]; /*[71, 75]*/ u21km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1]; /*[71, 75]*/ u31km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2]; /*[71, 75]*/ u41km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3]; /*[71, 75]*/ u51km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4]; /*[71, 75]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = tz3 * (u21k - u21km1); /*[71, 75]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tz3 * (u31k - u31km1); /*[71, 75]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /*[71, 75]*/ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /*[71, 75]*/ /*[71, 75]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dz1 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][0]); /*[71, 75]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dz2 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1]); /*[71, 75]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dz3 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2]); /*[71, 75]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dz4 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3]); /*[71, 75]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dz5 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4]); } /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[71, 75]*/ /*[71, 75]*/ rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82]); /*[71, 75]*/ rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][4][m_imopVarPre82]); } /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ for (k_imopVarPre81 = 3; k_imopVarPre81 <= nz - 4; k_imopVarPre81++) { /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[71, 75]*/ /*[71, 75]*/ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 2][m_imopVarPre82]); } } /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ /*[71, 75]*/ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /*[71, 75]*/ /*[71, 75]*/ rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 5][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]); /*[71, 75]*/ rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]); } } } /*[71, 75]*/ // #pragma omp dummyFlush BARRIER_START /*[71, 75]*/ #pragma omp barrier /*[72, 76]*/ // #pragma omp dummyFlush BARRIER_START /*[72, 76]*/ #pragma omp barrier /*[73, 77]*/ #pragma omp master { /*[73, 77]*/ /*[73, 77]*/ _imopVarPre372 = (istep % inorm == 0); /*[73, 77]*/ /*[73, 77]*/ if (!_imopVarPre372) { /*[73, 77]*/ /*[73, 77]*/ _imopVarPre372 = (istep == itmax); } } /*[73, 77]*/ // #pragma omp dummyFlush BARRIER_START /*[73, 77]*/ #pragma omp barrier /*[74]*/ /*[74]*/ if (_imopVarPre372) { /*[74]*/ /*[74]*/ double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /*[74]*/ double *sum; /*[74]*/ v = rsd; /*[74]*/ sum = rsdnm; /*[74]*/ int i_imopVarPre93; /*[74]*/ int j_imopVarPre94; /*[74]*/ int k_imopVarPre95; /*[74]*/ int m_imopVarPre96; /*[74]*/ double sum0 = 0.0; /*[74]*/ double sum1 = 0.0; /*[74]*/ double sum2 = 0.0; /*[74]*/ double sum3 = 0.0; /*[74]*/ double sum4 = 0.0; /*[74]*/ #pragma omp single nowait { /*[74]*/ /*[74]*/ /*[74]*/ /*[74]*/ /*[74]*/ for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) { /*[74]*/ /*[74]*/ sum[m_imopVarPre96] = 0.0; } } /*[74]*/ // #pragma omp dummyFlush BARRIER_START /*[74]*/ #pragma omp barrier /*[75]*/ #pragma omp for nowait /*[75]*/ /*[75]*/ /*[75]*/ for (i_imopVarPre93 = ist; i_imopVarPre93 <= iend; i_imopVarPre93++) { /*[75]*/ /*[75]*/ /*[75]*/ /*[75]*/ /*[75]*/ for (j_imopVarPre94 = jst; j_imopVarPre94 <= jend; j_imopVarPre94++) { /*[75]*/ /*[75]*/ /*[75]*/ /*[75]*/ /*[75]*/ for (k_imopVarPre95 = 1; k_imopVarPre95 <= nz0 - 2; k_imopVarPre95++) { /*[75]*/ /*[75]*/ sum0 = sum0 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0]; /*[75]*/ sum1 = sum1 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1]; /*[75]*/ sum2 = sum2 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2]; /*[75]*/ sum3 = sum3 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3]; /*[75]*/ sum4 = sum4 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4]; } } } /*[75]*/ // #pragma omp dummyFlush CRITICAL_START /*[75]*/ #pragma omp critical { /*[75]*/ /*[75]*/ sum[0] += sum0; /*[75]*/ sum[1] += sum1; /*[75]*/ sum[2] += sum2; /*[75]*/ sum[3] += sum3; /*[75]*/ sum[4] += sum4; } /*[75]*/ // #pragma omp dummyFlush CRITICAL_END /*[75]*/ // #pragma omp dummyFlush BARRIER_START /*[75]*/ #pragma omp barrier /*[76]*/ #pragma omp single nowait { /*[76]*/ /*[76]*/ /*[76]*/ /*[76]*/ /*[76]*/ for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) { /*[76]*/ /*[76]*/ double _imopVarPre154; /*[76]*/ double _imopVarPre155; /*[76]*/ _imopVarPre154 = sum[m_imopVarPre96] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /*[76]*/ _imopVarPre155 = sqrt(_imopVarPre154); /*[76]*/ /*[76]*/ sum[m_imopVarPre96] = _imopVarPre155; } } } /*[74, 76]*/ // #pragma omp dummyFlush BARRIER_START /*[74, 76]*/ #pragma omp barrier /*[75, 77]*/ #pragma omp master { /*[75, 77]*/ /*[75, 77]*/ _imopVarPre377 = (rsdnm[0] < tolrsd[0]); /*[75, 77]*/ /*[75, 77]*/ if (_imopVarPre377) { /*[75, 77]*/ /*[75, 77]*/ _imopVarPre378 = (rsdnm[1] < tolrsd[1]); /*[75, 77]*/ /*[75, 77]*/ if (_imopVarPre378) { /*[75, 77]*/ /*[75, 77]*/ _imopVarPre379 = (rsdnm[2] < tolrsd[2]); /*[75, 77]*/ /*[75, 77]*/ if (_imopVarPre379) { /*[75, 77]*/ /*[75, 77]*/ _imopVarPre380 = (rsdnm[3] < tolrsd[3]); /*[75, 77]*/ /*[75, 77]*/ if (_imopVarPre380) { /*[75, 77]*/ /*[75, 77]*/ _imopVarPre380 = (rsdnm[4] < tolrsd[4]); } /*[75, 77]*/ _imopVarPre379 = _imopVarPre380; } /*[75, 77]*/ _imopVarPre378 = _imopVarPre379; } /*[75, 77]*/ _imopVarPre377 = _imopVarPre378; } /*[75, 77]*/ /*[75, 77]*/ if (_imopVarPre377) { /*[75, 77]*/ /*[75, 77]*/ exit(1); /*[75, 77]*/ } } } } /*[]*/ timer_stop(1); /*[]*/ /*[]*/ maxtime = timer_read(1); /*[]*/ } /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ /*[]*/ static void verify(double xcr[5], double xce[5] , double xci , char *class , boolean *verified) { /*[]*/ /*[]*/ double xcrref[5]; /*[]*/ double xceref[5]; /*[]*/ double xciref; /*[]*/ double xcrdif[5]; /*[]*/ double xcedif[5]; /*[]*/ double xcidif; /*[]*/ double epsilon; /*[]*/ double dtref; /*[]*/ int m; /*[]*/ epsilon = 1.0e-08; /*[]*/ *class = 'U'; /*[]*/ *verified = 1; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ xcrref[m] = 1.0; /*[]*/ xceref[m] = 1.0; } /*[]*/ xciref = 1.0; /*[]*/ int _imopVarPre384; /*[]*/ int _imopVarPre385; /*[]*/ int _imopVarPre386; /*[]*/ _imopVarPre384 = nx0 == 12; /*[]*/ /*[]*/ if (_imopVarPre384) { /*[]*/ /*[]*/ _imopVarPre385 = ny0 == 12; /*[]*/ /*[]*/ if (_imopVarPre385) { /*[]*/ /*[]*/ _imopVarPre386 = nz0 == 12; /*[]*/ /*[]*/ if (_imopVarPre386) { /*[]*/ /*[]*/ _imopVarPre386 = itmax == 50; } /*[]*/ _imopVarPre385 = _imopVarPre386; } /*[]*/ _imopVarPre384 = _imopVarPre385; } /*[]*/ /*[]*/ if (_imopVarPre384) { /*[]*/ /*[]*/ *class = 'S'; /*[]*/ dtref = 5.0e-1; /*[]*/ xcrref[0] = 1.6196343210976702e-02; /*[]*/ xcrref[1] = 2.1976745164821318e-03; /*[]*/ xcrref[2] = 1.5179927653399185e-03; /*[]*/ xcrref[3] = 1.5029584435994323e-03; /*[]*/ xcrref[4] = 3.4264073155896461e-02; /*[]*/ xceref[0] = 6.4223319957960924e-04; /*[]*/ xceref[1] = 8.4144342047347926e-05; /*[]*/ xceref[2] = 5.8588269616485186e-05; /*[]*/ xceref[3] = 5.8474222595157350e-05; /*[]*/ xceref[4] = 1.3103347914111294e-03; /*[]*/ xciref = 7.8418928865937083; } else { /*[]*/ /*[]*/ int _imopVarPre390; /*[]*/ int _imopVarPre391; /*[]*/ int _imopVarPre392; /*[]*/ _imopVarPre390 = nx0 == 33; /*[]*/ /*[]*/ if (_imopVarPre390) { /*[]*/ /*[]*/ _imopVarPre391 = ny0 == 33; /*[]*/ /*[]*/ if (_imopVarPre391) { /*[]*/ /*[]*/ _imopVarPre392 = nz0 == 33; /*[]*/ /*[]*/ if (_imopVarPre392) { /*[]*/ /*[]*/ _imopVarPre392 = itmax == 300; } /*[]*/ _imopVarPre391 = _imopVarPre392; } /*[]*/ _imopVarPre390 = _imopVarPre391; } /*[]*/ /*[]*/ if (_imopVarPre390) { /*[]*/ /*[]*/ *class = 'W'; /*[]*/ dtref = 1.5e-3; /*[]*/ xcrref[0] = 0.1236511638192e+02; /*[]*/ xcrref[1] = 0.1317228477799e+01; /*[]*/ xcrref[2] = 0.2550120713095e+01; /*[]*/ xcrref[3] = 0.2326187750252e+01; /*[]*/ xcrref[4] = 0.2826799444189e+02; /*[]*/ xceref[0] = 0.4867877144216; /*[]*/ xceref[1] = 0.5064652880982e-01; /*[]*/ xceref[2] = 0.9281818101960e-01; /*[]*/ xceref[3] = 0.8570126542733e-01; /*[]*/ xceref[4] = 0.1084277417792e+01; /*[]*/ xciref = 0.1161399311023e+02; } else { /*[]*/ /*[]*/ int _imopVarPre396; /*[]*/ int _imopVarPre397; /*[]*/ int _imopVarPre398; /*[]*/ _imopVarPre396 = nx0 == 64; /*[]*/ /*[]*/ if (_imopVarPre396) { /*[]*/ /*[]*/ _imopVarPre397 = ny0 == 64; /*[]*/ /*[]*/ if (_imopVarPre397) { /*[]*/ /*[]*/ _imopVarPre398 = nz0 == 64; /*[]*/ /*[]*/ if (_imopVarPre398) { /*[]*/ /*[]*/ _imopVarPre398 = itmax == 250; } /*[]*/ _imopVarPre397 = _imopVarPre398; } /*[]*/ _imopVarPre396 = _imopVarPre397; } /*[]*/ /*[]*/ if (_imopVarPre396) { /*[]*/ /*[]*/ *class = 'A'; /*[]*/ dtref = 2.0e+0; /*[]*/ xcrref[0] = 7.7902107606689367e+02; /*[]*/ xcrref[1] = 6.3402765259692870e+01; /*[]*/ xcrref[2] = 1.9499249727292479e+02; /*[]*/ xcrref[3] = 1.7845301160418537e+02; /*[]*/ xcrref[4] = 1.8384760349464247e+03; /*[]*/ xceref[0] = 2.9964085685471943e+01; /*[]*/ xceref[1] = 2.8194576365003349; /*[]*/ xceref[2] = 7.3473412698774742; /*[]*/ xceref[3] = 6.7139225687777051; /*[]*/ xceref[4] = 7.0715315688392578e+01; /*[]*/ xciref = 2.6030925604886277e+01; } else { /*[]*/ /*[]*/ int _imopVarPre402; /*[]*/ int _imopVarPre403; /*[]*/ int _imopVarPre404; /*[]*/ _imopVarPre402 = nx0 == 102; /*[]*/ /*[]*/ if (_imopVarPre402) { /*[]*/ /*[]*/ _imopVarPre403 = ny0 == 102; /*[]*/ /*[]*/ if (_imopVarPre403) { /*[]*/ /*[]*/ _imopVarPre404 = nz0 == 102; /*[]*/ /*[]*/ if (_imopVarPre404) { /*[]*/ /*[]*/ _imopVarPre404 = itmax == 250; } /*[]*/ _imopVarPre403 = _imopVarPre404; } /*[]*/ _imopVarPre402 = _imopVarPre403; } /*[]*/ /*[]*/ if (_imopVarPre402) { /*[]*/ /*[]*/ *class = 'B'; /*[]*/ dtref = 2.0e+0; /*[]*/ xcrref[0] = 3.5532672969982736e+03; /*[]*/ xcrref[1] = 2.6214750795310692e+02; /*[]*/ xcrref[2] = 8.8333721850952190e+02; /*[]*/ xcrref[3] = 7.7812774739425265e+02; /*[]*/ xcrref[4] = 7.3087969592545314e+03; /*[]*/ xceref[0] = 1.1401176380212709e+02; /*[]*/ xceref[1] = 8.1098963655421574; /*[]*/ xceref[2] = 2.8480597317698308e+01; /*[]*/ xceref[3] = 2.5905394567832939e+01; /*[]*/ xceref[4] = 2.6054907504857413e+02; /*[]*/ xciref = 4.7887162703308227e+01; } else { /*[]*/ /*[]*/ int _imopVarPre408; /*[]*/ int _imopVarPre409; /*[]*/ int _imopVarPre410; /*[]*/ _imopVarPre408 = nx0 == 162; /*[]*/ /*[]*/ if (_imopVarPre408) { /*[]*/ /*[]*/ _imopVarPre409 = ny0 == 162; /*[]*/ /*[]*/ if (_imopVarPre409) { /*[]*/ /*[]*/ _imopVarPre410 = nz0 == 162; /*[]*/ /*[]*/ if (_imopVarPre410) { /*[]*/ /*[]*/ _imopVarPre410 = itmax == 250; } /*[]*/ _imopVarPre409 = _imopVarPre410; } /*[]*/ _imopVarPre408 = _imopVarPre409; } /*[]*/ /*[]*/ if (_imopVarPre408) { /*[]*/ /*[]*/ *class = 'C'; /*[]*/ dtref = 2.0e+0; /*[]*/ xcrref[0] = 1.03766980323537846e+04; /*[]*/ xcrref[1] = 8.92212458801008552e+02; /*[]*/ xcrref[2] = 2.56238814582660871e+03; /*[]*/ xcrref[3] = 2.19194343857831427e+03; /*[]*/ xcrref[4] = 1.78078057261061185e+04; /*[]*/ xceref[0] = 2.15986399716949279e+02; /*[]*/ xceref[1] = 1.55789559239863600e+01; /*[]*/ xceref[2] = 5.41318863077207766e+01; /*[]*/ xceref[3] = 4.82262643154045421e+01; /*[]*/ xceref[4] = 4.55902910043250358e+02; /*[]*/ xciref = 6.66404553572181300e+01; } else { /*[]*/ /*[]*/ *verified = 0; } } } } } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ double _imopVarPre412; /*[]*/ double _imopVarPre413; /*[]*/ _imopVarPre412 = (xcr[m] - xcrref[m]) / xcrref[m]; /*[]*/ _imopVarPre413 = fabs(_imopVarPre412); /*[]*/ /*[]*/ xcrdif[m] = _imopVarPre413; /*[]*/ double _imopVarPre415; /*[]*/ double _imopVarPre416; /*[]*/ _imopVarPre415 = (xce[m] - xceref[m]) / xceref[m]; /*[]*/ _imopVarPre416 = fabs(_imopVarPre415); /*[]*/ /*[]*/ xcedif[m] = _imopVarPre416; } /*[]*/ double _imopVarPre418; /*[]*/ double _imopVarPre419; /*[]*/ _imopVarPre418 = (xci - xciref) / xciref; /*[]*/ _imopVarPre419 = fabs(_imopVarPre418); /*[]*/ /*[]*/ xcidif = _imopVarPre419; /*[]*/ /*[]*/ if (*class != 'U') { /*[]*/ /*[]*/ char _imopVarPre421; /*[]*/ _imopVarPre421 = *class; /*[]*/ printf("\n Verification being performed for class %1c\n", _imopVarPre421); /*[]*/ /*[]*/ printf(" Accuracy setting for epsilon = %20.13e\n", epsilon); /*[]*/ /*[]*/ double _imopVarPre424; /*[]*/ double _imopVarPre425; /*[]*/ _imopVarPre424 = dt - dtref; /*[]*/ _imopVarPre425 = fabs(_imopVarPre424); /*[]*/ /*[]*/ /*[]*/ if (_imopVarPre425 > epsilon) { /*[]*/ /*[]*/ *verified = 0; /*[]*/ *class = 'U'; /*[]*/ printf(" DT does not match the reference value of %15.8e\n", dtref); /*[]*/ } } else { /*[]*/ /*[]*/ printf(" Unknown class\n"); /*[]*/ } /*[]*/ /*[]*/ if (*class != 'U') { /*[]*/ /*[]*/ printf(" Comparison of RMS-norms of residual\n"); /*[]*/ } else { /*[]*/ /*[]*/ printf(" RMS-norms of residual\n"); /*[]*/ } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ /*[]*/ if (*class == 'U') { /*[]*/ /*[]*/ double _imopVarPre427; /*[]*/ _imopVarPre427 = xcr[m]; /*[]*/ printf(" %2d %20.13e\n", m, _imopVarPre427); /*[]*/ } else { /*[]*/ /*[]*/ /*[]*/ if (xcrdif[m] > epsilon) { /*[]*/ /*[]*/ *verified = 0; /*[]*/ double _imopVarPre431; /*[]*/ double _imopVarPre432; /*[]*/ double _imopVarPre433; /*[]*/ _imopVarPre431 = xcrdif[m]; /*[]*/ _imopVarPre432 = xcrref[m]; /*[]*/ _imopVarPre433 = xcr[m]; /*[]*/ printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre433, _imopVarPre432, _imopVarPre431); /*[]*/ } else { /*[]*/ /*[]*/ double _imopVarPre437; /*[]*/ double _imopVarPre438; /*[]*/ double _imopVarPre439; /*[]*/ _imopVarPre437 = xcrdif[m]; /*[]*/ _imopVarPre438 = xcrref[m]; /*[]*/ _imopVarPre439 = xcr[m]; /*[]*/ printf(" %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre439, _imopVarPre438, _imopVarPre437); /*[]*/ } } } /*[]*/ /*[]*/ if (*class != 'U') { /*[]*/ /*[]*/ printf(" Comparison of RMS-norms of solution error\n"); /*[]*/ } else { /*[]*/ /*[]*/ printf(" RMS-norms of solution error\n"); /*[]*/ } /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (m = 0; m < 5; m++) { /*[]*/ /*[]*/ /*[]*/ if (*class == 'U') { /*[]*/ /*[]*/ double _imopVarPre441; /*[]*/ _imopVarPre441 = xce[m]; /*[]*/ printf(" %2d %20.13e\n", m, _imopVarPre441); /*[]*/ } else { /*[]*/ /*[]*/ /*[]*/ if (xcedif[m] > epsilon) { /*[]*/ /*[]*/ *verified = 0; /*[]*/ double _imopVarPre445; /*[]*/ double _imopVarPre446; /*[]*/ double _imopVarPre447; /*[]*/ _imopVarPre445 = xcedif[m]; /*[]*/ _imopVarPre446 = xceref[m]; /*[]*/ _imopVarPre447 = xce[m]; /*[]*/ printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre447, _imopVarPre446, _imopVarPre445); /*[]*/ } else { /*[]*/ /*[]*/ double _imopVarPre451; /*[]*/ double _imopVarPre452; /*[]*/ double _imopVarPre453; /*[]*/ _imopVarPre451 = xcedif[m]; /*[]*/ _imopVarPre452 = xceref[m]; /*[]*/ _imopVarPre453 = xce[m]; /*[]*/ printf(" %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre453, _imopVarPre452, _imopVarPre451); /*[]*/ } } } /*[]*/ /*[]*/ if (*class != 'U') { /*[]*/ /*[]*/ printf(" Comparison of surface integral\n"); /*[]*/ } else { /*[]*/ /*[]*/ printf(" Surface integral\n"); /*[]*/ } /*[]*/ /*[]*/ if (*class == 'U') { /*[]*/ /*[]*/ printf(" %20.13e\n", xci); /*[]*/ } else { /*[]*/ /*[]*/ /*[]*/ if (xcidif > epsilon) { /*[]*/ /*[]*/ *verified = 0; /*[]*/ printf(" FAILURE: %20.13e%20.13e%20.13e\n", xci, xciref, xcidif); /*[]*/ } else { /*[]*/ /*[]*/ printf(" %20.13e%20.13e%20.13e\n", xci, xciref, xcidif); /*[]*/ } } /*[]*/ /*[]*/ if (*class == 'U') { /*[]*/ /*[]*/ printf(" No reference values provided\n"); /*[]*/ /*[]*/ printf(" No verification performed\n"); /*[]*/ } else { /*[]*/ /*[]*/ /*[]*/ if (*verified) { /*[]*/ /*[]*/ printf(" Verification Successful\n"); /*[]*/ } else { /*[]*/ /*[]*/ printf(" Verification failed\n"); /*[]*/ } } }
/* [] */ typedef long long __int64_t; /* [] */ typedef __int64_t __darwin_off_t; /* [] */ typedef __darwin_off_t fpos_t; /* [] */ struct __sbuf { unsigned char *_base; int _size; }; /* [] */ struct __sFILEX; /* [] */ struct __sFILE { unsigned char *_p; int _r; int _w; short _flags; short _file; struct __sbuf _bf; int _lbfsize; void *_cookie; int (*_close) (void *); int (*_read) (void *, char *, int); fpos_t(*_seek) (void *, fpos_t, int); int (*_write) (void *, const char *, int); struct __sbuf _ub; struct __sFILEX *_extra; int _ur; unsigned char _ubuf[3]; unsigned char _nbuf[1]; struct __sbuf _lb; int _blksize; fpos_t _offset; }; /* [] */ typedef struct __sFILE FILE; /* [] */ int fclose(FILE *); /* [] */ int fgetc(FILE *); /* [] */ FILE *fopen(const char *restrict __filename, const char *restrict __mode); /* [] */ int fscanf(FILE * restrict, const char *restrict,...); /* [] */ int printf(const char *restrict,...); /* [] */ void exit(int); /* [] */ extern double fabs(double); /* [] */ extern double sqrt(double); /* [] */ extern int omp_get_num_threads(void); /* [] */ typedef int boolean; /* [] */ extern void timer_clear(int); /* [] */ extern void timer_start(int); /* [] */ extern void timer_stop(int); /* [] */ extern double timer_read(int); /* [] */ extern void c_print_results(char *name, char class, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags, char *rand); /* [] */ static int nx; /* [] */ static int ny; /* [] */ static int nz; /* [] */ static int nx0; /* [] */ static int ny0; /* [] */ static int nz0; /* [] */ static int ist; /* [] */ static int iend; /* [] */ static int jst; /* [] */ static int jend; /* [] */ static int ii1; /* [] */ static int ii2; /* [] */ static int ji1; /* [] */ static int ji2; /* [] */ static int ki1; /* [] */ static int ki2; /* [] */ static double dxi; /* [] */ static double deta; /* [] */ static double dzeta; /* [] */ static double tx1; /* [] */ static double tx2; /* [] */ static double tx3; /* [] */ static double ty1; /* [] */ static double ty2; /* [] */ static double ty3; /* [] */ static double tz1; /* [] */ static double tz2; /* [] */ static double tz3; /* [] */ static double dx1; /* [] */ static double dx2; /* [] */ static double dx3; /* [] */ static double dx4; /* [] */ static double dx5; /* [] */ static double dy1; /* [] */ static double dy2; /* [] */ static double dy3; /* [] */ static double dy4; /* [] */ static double dy5; /* [] */ static double dz1; /* [] */ static double dz2; /* [] */ static double dz3; /* [] */ static double dz4; /* [] */ static double dz5; /* [] */ static double dssp; /* [] */ static double u[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [] */ static double rsd[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [] */ static double frct[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [] */ static double flux[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [] */ static int ipr; /* [] */ static int inorm; /* [] */ static int itmax; /* [] */ static double dt; /* [] */ static double omega; /* [] */ static double tolrsd[5]; /* [] */ static double rsdnm[5]; /* [] */ static double errnm[5]; /* [] */ static double frc; /* [] */ static double a[12][12][5][5]; /* [] */ static double b[12][12][5][5]; /* [] */ static double c[12][12][5][5]; /* [] */ static double d[12][12][5][5]; /* [] */ static double ce[5][13]; /* [] */ static double maxtime; /* [] */ static boolean flag[12 / 2 * 2 + 1]; /* [] */ static void blts(int nx, int ny, int nz, int k, double omega, double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5], double ldz[12][12][5][5], double ldy[12][12][5][5], double ldx[12][12][5][5], double d[12][12][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0); /* [] */ static void buts(int nx, int ny, int nz, int k, double omega, double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5], double tv[12][12][5], double d[12][12][5][5], double udx[12][12][5][5], double udy[12][12][5][5], double udz[12][12][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0); /* [] */ static void domain(void); /* [] */ static void erhs(void); /* [] */ static void error(void); /* [] */ static void exact(int i, int j, int k, double u000ijk[5]); /* [] */ static void jacld(int k); /* [] */ static void jacu(int k); /* [] */ static void l2norm(int nx0, int ny0, int nz0, int ist, int iend, int jst, int jend, double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5], double sum[5]); /* [] */ static void pintgr(void); /* [] */ static void read_input(void); /* [] */ static void rhs(void); /* [] */ static void setbv(void); /* [] */ static void setcoeff(void); /* [] */ static void setiv(void); /* [] */ static void ssor(void); /* [] */ static void verify(double xcr[5], double xce[5], double xci, char *class, boolean * verified); /* [] */ /* [] */ /* [] */ int main(int argc, char **argv) { /* [] */ /* [] */ char class; /* [] */ boolean verified; /* [] */ double mflops; /* [] */ int nthreads = 1; /* [] */ read_input(); /* [] */ /* [] */ domain(); /* [] */ /* [] */ setcoeff(); /* [] */ /* [1] */ /* [1] */ /* [1] */ int i; /* [1] */ int j; /* [1] */ int k; /* [1] */ int iglob; /* [1] */ int jglob; /* [1] */ /* [1] */ /* [1] */ /* [1] */ for (i = 0; i < nx; i++) { /* [1] */ /* [1] */ iglob = i; /* [1] */ /* [1] */ /* [1] */ /* [1] */ for (j = 0; j < ny; j++) { /* [1] */ /* [1] */ jglob = j; /* [1] */ double *_imopVarPre239; /* [1] */ _imopVarPre239 = &u[i][j][0][0]; /* [1] */ exact(iglob, jglob, 0, _imopVarPre239); /* [1] */ /* [1] */ double *_imopVarPre242; /* [1] */ int _imopVarPre243; /* [1] */ _imopVarPre242 = &u[i][j][nz - 1][0]; /* [1] */ _imopVarPre243 = nz - 1; /* [1] */ exact(iglob, jglob, _imopVarPre243, _imopVarPre242); /* [1] */ } } /* [1] */ // /* [1] */ /* [2] */ /* [2] */ /* [2] */ /* [2] */ for (i = 0; i < nx; i++) { /* [2] */ /* [2] */ iglob = i; /* [2] */ /* [2] */ /* [2] */ /* [2] */ for (k = 0; k < nz; k++) { /* [2] */ /* [2] */ double *_imopVarPre245; /* [2] */ _imopVarPre245 = &u[i][0][k][0]; /* [2] */ exact(iglob, 0, k, _imopVarPre245); /* [2] */ } } /* [2] */ // /* [2] */ /* [3] */ /* [3] */ /* [3] */ /* [3] */ for (i = 0; i < nx; i++) { /* [3] */ /* [3] */ iglob = i; /* [3] */ /* [3] */ /* [3] */ /* [3] */ for (k = 0; k < nz; k++) { /* [3] */ /* [3] */ double *_imopVarPre248; /* [3] */ int _imopVarPre249; /* [3] */ _imopVarPre248 = &u[i][ny - 1][k][0]; /* [3] */ _imopVarPre249 = ny0 - 1; /* [3] */ exact(iglob, _imopVarPre249, k, _imopVarPre248); /* [3] */ } } /* [3] */ // /* [3] */ /* [4] */ /* [4] */ /* [4] */ /* [4] */ for (j = 0; j < ny; j++) { /* [4] */ /* [4] */ jglob = j; /* [4] */ /* [4] */ /* [4] */ /* [4] */ for (k = 0; k < nz; k++) { /* [4] */ /* [4] */ double *_imopVarPre251; /* [4] */ _imopVarPre251 = &u[0][j][k][0]; /* [4] */ exact(0, jglob, k, _imopVarPre251); /* [4] */ } } /* [4] */ // /* [4] */ /* [5] */ /* [5] */ /* [5] */ /* [5] */ for (j = 0; j < ny; j++) { /* [5] */ /* [5] */ jglob = j; /* [5] */ /* [5] */ /* [5] */ /* [5] */ for (k = 0; k < nz; k++) { /* [5] */ /* [5] */ double *_imopVarPre254; /* [5] */ int _imopVarPre255; /* [5] */ _imopVarPre254 = &u[nx - 1][j][k][0]; /* [5] */ _imopVarPre255 = nx0 - 1; /* [5] */ exact(_imopVarPre255, jglob, k, _imopVarPre254); /* [5] */ } } /* [6] */ /* [6] */ /* [6] */ int i; /* [6] */ int j; /* [6] */ int k; /* [6] */ int m; /* [6] */ int iglob; /* [6] */ int jglob; /* [6] */ double xi; /* [6] */ double eta; /* [6] */ double zeta; /* [6] */ double pxi; /* [6] */ double peta; /* [6] */ double pzeta; /* [6] */ double ue_1jk[5]; /* [6] */ double ue_nx0jk[5]; /* [6] */ double ue_i1k[5]; /* [6] */ double ue_iny0k[5]; /* [6] */ double ue_ij1[5]; /* [6] */ double ue_ijnz[5]; /* [6] */ /* [6] */ /* [6] */ /* [6] */ for (j = 0; j < ny; j++) { /* [6] */ /* [6] */ jglob = j; /* [6] */ /* [6] */ /* [6] */ /* [6] */ for (k = 1; k < nz - 1; k++) { /* [6] */ /* [6] */ zeta = ((double)k) / (nz - 1); /* [6] */ int _imopVarPre361; /* [6] */ _imopVarPre361 = jglob != 0; /* [6] */ /* [6] */ if (_imopVarPre361) { /* [6] */ /* [6] */ _imopVarPre361 = jglob != ny0 - 1; } /* [6] */ /* [6] */ if (_imopVarPre361) { /* [6] */ /* [6] */ eta = ((double)jglob) / (ny0 - 1); /* [6] */ /* [6] */ /* [6] */ /* [6] */ for (i = 0; i < nx; i++) { /* [6] */ /* [6] */ iglob = i; /* [6] */ int _imopVarPre363; /* [6] */ _imopVarPre363 = iglob != 0; /* [6] */ /* [6] */ if (_imopVarPre363) { /* [6] */ /* [6] */ _imopVarPre363 = iglob != nx0 - 1; } /* [6] */ /* [6] */ if (_imopVarPre363) { /* [6] */ /* [6] */ xi = ((double)iglob) / (nx0 - 1); /* [6] */ exact(0, jglob, k, ue_1jk); /* [6] */ /* [6] */ int _imopVarPre365; /* [6] */ _imopVarPre365 = nx0 - 1; /* [6] */ exact(_imopVarPre365, jglob, k, ue_nx0jk); /* [6] */ /* [6] */ exact(iglob, 0, k, ue_i1k); /* [6] */ /* [6] */ int _imopVarPre367; /* [6] */ _imopVarPre367 = ny0 - 1; /* [6] */ exact(iglob, _imopVarPre367, k, ue_iny0k); /* [6] */ /* [6] */ exact(iglob, jglob, 0, ue_ij1); /* [6] */ /* [6] */ int _imopVarPre369; /* [6] */ _imopVarPre369 = nz - 1; /* [6] */ exact(iglob, jglob, _imopVarPre369, ue_ijnz); /* [6] */ /* [6] */ /* [6] */ /* [6] */ /* [6] */ for (m = 0; m < 5; m++) { /* [6] */ /* [6] */ pxi = (1.0 - xi) * ue_1jk[m] + xi * ue_nx0jk[m]; /* [6] */ peta = (1.0 - eta) * ue_i1k[m] + eta * ue_iny0k[m]; /* [6] */ pzeta = (1.0 - zeta) * ue_ij1[m] + zeta * ue_ijnz[m]; /* [6] */ u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta; } } } } } } /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ int i; /* [6, 7] */ int j; /* [6, 7] */ int k; /* [6, 7] */ int m; /* [6, 7] */ int iglob; /* [6, 7] */ int jglob; /* [6, 7] */ int L1; /* [6, 7] */ int L2; /* [6, 7] */ int ist1; /* [6, 7] */ int iend1; /* [6, 7] */ int jst1; /* [6, 7] */ int jend1; /* [6, 7] */ double dsspm; /* [6, 7] */ double xi; /* [6, 7] */ double eta; /* [6, 7] */ double zeta; /* [6, 7] */ double q; /* [6, 7] */ double u21; /* [6, 7] */ double u31; /* [6, 7] */ double u41; /* [6, 7] */ double tmp; /* [6, 7] */ double u21i; /* [6, 7] */ double u31i; /* [6, 7] */ double u41i; /* [6, 7] */ double u51i; /* [6, 7] */ double u21j; /* [6, 7] */ double u31j; /* [6, 7] */ double u41j; /* [6, 7] */ double u51j; /* [6, 7] */ double u21k; /* [6, 7] */ double u31k; /* [6, 7] */ double u41k; /* [6, 7] */ double u51k; /* [6, 7] */ double u21im1; /* [6, 7] */ double u31im1; /* [6, 7] */ double u41im1; /* [6, 7] */ double u51im1; /* [6, 7] */ double u21jm1; /* [6, 7] */ double u31jm1; /* [6, 7] */ double u41jm1; /* [6, 7] */ double u51jm1; /* [6, 7] */ double u21km1; /* [6, 7] */ double u31km1; /* [6, 7] */ double u41km1; /* [6, 7] */ double u51km1; /* [6, 7] */ dsspm = dssp; /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ for (i = 0; i < nx; i++) { /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ for (j = 0; j < ny; j++) { /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ for (k = 0; k < nz; k++) { /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ for (m = 0; m < 5; m++) { /* [6, 7] */ /* [6, 7] */ frct[i][j][k][m] = 0.0; } } } } /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ for (i = 0; i < nx; i++) { /* [6, 7] */ /* [6, 7] */ iglob = i; /* [6, 7] */ xi = ((double)iglob) / (nx0 - 1); /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ for (j = 0; j < ny; j++) { /* [6, 7] */ /* [6, 7] */ jglob = j; /* [6, 7] */ eta = ((double)jglob) / (ny0 - 1); /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ for (k = 0; k < nz; k++) { /* [6, 7] */ /* [6, 7] */ zeta = ((double)k) / (nz - 1); /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ for (m = 0; m < 5; m++) { /* [6, 7] */ /* [6, 7] */ rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } } } /* [6, 7] */ // /* [6, 7] */ /* [6, 8] */ L1 = 0; /* [6, 8] */ L2 = nx - 1; /* [6, 8] */ /* [6, 8] */ /* [6, 8] */ /* [6, 8] */ for (i = L1; i <= L2; i++) { /* [6, 8] */ /* [6, 8] */ /* [6, 8] */ /* [6, 8] */ /* [6, 8] */ for (j = jst; j <= jend; j++) { /* [6, 8] */ /* [6, 8] */ /* [6, 8] */ /* [6, 8] */ /* [6, 8] */ for (k = 1; k < nz - 1; k++) { /* [6, 8] */ /* [6, 8] */ flux[i][j][k][0] = rsd[i][j][k][1]; /* [6, 8] */ u21 = rsd[i][j][k][1] / rsd[i][j][k][0]; /* [6, 8] */ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /* [6, 8] */ flux[i][j][k][1] = rsd[i][j][k][1] * u21 + 0.40e+00 * (rsd[i][j][k][4] - q); /* [6, 8] */ flux[i][j][k][2] = rsd[i][j][k][2] * u21; /* [6, 8] */ flux[i][j][k][3] = rsd[i][j][k][3] * u21; /* [6, 8] */ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21; } } } /* [6, 8] */ // /* [6, 8] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (j = jst; j <= jend; j++) { /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (k = 1; k <= nz - 2; k++) { /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (i = ist; i <= iend; i++) { /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (m = 0; m < 5; m++) { /* [6, 9] */ /* [6, 9] */ frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]); } } /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (i = ist; i <= L2; i++) { /* [6, 9] */ /* [6, 9] */ tmp = 1.0 / rsd[i][j][k][0]; /* [6, 9] */ u21i = tmp * rsd[i][j][k][1]; /* [6, 9] */ u31i = tmp * rsd[i][j][k][2]; /* [6, 9] */ u41i = tmp * rsd[i][j][k][3]; /* [6, 9] */ u51i = tmp * rsd[i][j][k][4]; /* [6, 9] */ tmp = 1.0 / rsd[i - 1][j][k][0]; /* [6, 9] */ u21im1 = tmp * rsd[i - 1][j][k][1]; /* [6, 9] */ u31im1 = tmp * rsd[i - 1][j][k][2]; /* [6, 9] */ u41im1 = tmp * rsd[i - 1][j][k][3]; /* [6, 9] */ u51im1 = tmp * rsd[i - 1][j][k][4]; /* [6, 9] */ flux[i][j][k][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /* [6, 9] */ flux[i][j][k][2] = tx3 * (u31i - u31im1); /* [6, 9] */ flux[i][j][k][3] = tx3 * (u41i - u41im1); /* [6, 9] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * ((u21i * u21i + u31i * u31i + u41i * u41i) - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + (1.0 / 6.0) * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (i = ist; i <= iend; i++) { /* [6, 9] */ /* [6, 9] */ frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * (rsd[i - 1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i + 1][j][k][0]); /* [6, 9] */ frct[i][j][k][1] = frct[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (rsd[i - 1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i + 1][j][k][1]); /* [6, 9] */ frct[i][j][k][2] = frct[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (rsd[i - 1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i + 1][j][k][2]); /* [6, 9] */ frct[i][j][k][3] = frct[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (rsd[i - 1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i + 1][j][k][3]); /* [6, 9] */ frct[i][j][k][4] = frct[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (rsd[i - 1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i + 1][j][k][4]); } /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (m = 0; m < 5; m++) { /* [6, 9] */ /* [6, 9] */ frct[1][j][k][m] = frct[1][j][k][m] - dsspm * (+5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m]); /* [6, 9] */ frct[2][j][k][m] = frct[2][j][k][m] - dsspm * (-4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m]); } /* [6, 9] */ ist1 = 3; /* [6, 9] */ iend1 = nx - 4; /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (i = ist1; i <= iend1; i++) { /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (m = 0; m < 5; m++) { /* [6, 9] */ /* [6, 9] */ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]); } } /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (m = 0; m < 5; m++) { /* [6, 9] */ /* [6, 9] */ frct[nx - 3][j][k][m] = frct[nx - 3][j][k][m] - dsspm * (rsd[nx - 5][j][k][m] - 4.0 * rsd[nx - 4][j][k][m] + 6.0 * rsd[nx - 3][j][k][m] - 4.0 * rsd[nx - 2][j][k][m]); /* [6, 9] */ frct[nx - 2][j][k][m] = frct[nx - 2][j][k][m] - dsspm * (rsd[nx - 4][j][k][m] - 4.0 * rsd[nx - 3][j][k][m] + 5.0 * rsd[nx - 2][j][k][m]); } } } /* [6, 9] */ // /* [6, 9] */ /* [6, 10] */ L1 = 0; /* [6, 10] */ L2 = ny - 1; /* [6, 10] */ /* [6, 10] */ /* [6, 10] */ /* [6, 10] */ for (i = ist; i <= iend; i++) { /* [6, 10] */ /* [6, 10] */ /* [6, 10] */ /* [6, 10] */ /* [6, 10] */ for (j = L1; j <= L2; j++) { /* [6, 10] */ /* [6, 10] */ /* [6, 10] */ /* [6, 10] */ /* [6, 10] */ for (k = 1; k <= nz - 2; k++) { /* [6, 10] */ /* [6, 10] */ flux[i][j][k][0] = rsd[i][j][k][2]; /* [6, 10] */ u31 = rsd[i][j][k][2] / rsd[i][j][k][0]; /* [6, 10] */ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /* [6, 10] */ flux[i][j][k][1] = rsd[i][j][k][1] * u31; /* [6, 10] */ flux[i][j][k][2] = rsd[i][j][k][2] * u31 + 0.40e+00 * (rsd[i][j][k][4] - q); /* [6, 10] */ flux[i][j][k][3] = rsd[i][j][k][3] * u31; /* [6, 10] */ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31; } } } /* [6, 10] */ // /* [6, 10] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (i = ist; i <= iend; i++) { /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (k = 1; k <= nz - 2; k++) { /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (j = jst; j <= jend; j++) { /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (m = 0; m < 5; m++) { /* [6, 11] */ /* [6, 11] */ frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]); } } /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (j = jst; j <= L2; j++) { /* [6, 11] */ /* [6, 11] */ tmp = 1.0 / rsd[i][j][k][0]; /* [6, 11] */ u21j = tmp * rsd[i][j][k][1]; /* [6, 11] */ u31j = tmp * rsd[i][j][k][2]; /* [6, 11] */ u41j = tmp * rsd[i][j][k][3]; /* [6, 11] */ u51j = tmp * rsd[i][j][k][4]; /* [6, 11] */ tmp = 1.0 / rsd[i][j - 1][k][0]; /* [6, 11] */ u21jm1 = tmp * rsd[i][j - 1][k][1]; /* [6, 11] */ u31jm1 = tmp * rsd[i][j - 1][k][2]; /* [6, 11] */ u41jm1 = tmp * rsd[i][j - 1][k][3]; /* [6, 11] */ u51jm1 = tmp * rsd[i][j - 1][k][4]; /* [6, 11] */ flux[i][j][k][1] = ty3 * (u21j - u21jm1); /* [6, 11] */ flux[i][j][k][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /* [6, 11] */ flux[i][j][k][3] = ty3 * (u41j - u41jm1); /* [6, 11] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * ((u21j * u21j + u31j * u31j + u41j * u41j) - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + (1.0 / 6.0) * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (j = jst; j <= jend; j++) { /* [6, 11] */ /* [6, 11] */ frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * (rsd[i][j - 1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j + 1][k][0]); /* [6, 11] */ frct[i][j][k][1] = frct[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (rsd[i][j - 1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j + 1][k][1]); /* [6, 11] */ frct[i][j][k][2] = frct[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (rsd[i][j - 1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j + 1][k][2]); /* [6, 11] */ frct[i][j][k][3] = frct[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (rsd[i][j - 1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j + 1][k][3]); /* [6, 11] */ frct[i][j][k][4] = frct[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (rsd[i][j - 1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j + 1][k][4]); } /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (m = 0; m < 5; m++) { /* [6, 11] */ /* [6, 11] */ frct[i][1][k][m] = frct[i][1][k][m] - dsspm * (+5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m]); /* [6, 11] */ frct[i][2][k][m] = frct[i][2][k][m] - dsspm * (-4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m]); } /* [6, 11] */ jst1 = 3; /* [6, 11] */ jend1 = ny - 4; /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (j = jst1; j <= jend1; j++) { /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (m = 0; m < 5; m++) { /* [6, 11] */ /* [6, 11] */ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]); } } /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (m = 0; m < 5; m++) { /* [6, 11] */ /* [6, 11] */ frct[i][ny - 3][k][m] = frct[i][ny - 3][k][m] - dsspm * (rsd[i][ny - 5][k][m] - 4.0 * rsd[i][ny - 4][k][m] + 6.0 * rsd[i][ny - 3][k][m] - 4.0 * rsd[i][ny - 2][k][m]); /* [6, 11] */ frct[i][ny - 2][k][m] = frct[i][ny - 2][k][m] - dsspm * (rsd[i][ny - 4][k][m] - 4.0 * rsd[i][ny - 3][k][m] + 5.0 * rsd[i][ny - 2][k][m]); } } } /* [6, 11] */ // /* [6, 11] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (i = ist; i <= iend; i++) { /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (j = jst; j <= jend; j++) { /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (k = 0; k <= nz - 1; k++) { /* [6, 12] */ /* [6, 12] */ flux[i][j][k][0] = rsd[i][j][k][3]; /* [6, 12] */ u41 = rsd[i][j][k][3] / rsd[i][j][k][0]; /* [6, 12] */ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /* [6, 12] */ flux[i][j][k][1] = rsd[i][j][k][1] * u41; /* [6, 12] */ flux[i][j][k][2] = rsd[i][j][k][2] * u41; /* [6, 12] */ flux[i][j][k][3] = rsd[i][j][k][3] * u41 + 0.40e+00 * (rsd[i][j][k][4] - q); /* [6, 12] */ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41; } /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (k = 1; k <= nz - 2; k++) { /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (m = 0; m < 5; m++) { /* [6, 12] */ /* [6, 12] */ frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]); } } /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (k = 1; k <= nz - 1; k++) { /* [6, 12] */ /* [6, 12] */ tmp = 1.0 / rsd[i][j][k][0]; /* [6, 12] */ u21k = tmp * rsd[i][j][k][1]; /* [6, 12] */ u31k = tmp * rsd[i][j][k][2]; /* [6, 12] */ u41k = tmp * rsd[i][j][k][3]; /* [6, 12] */ u51k = tmp * rsd[i][j][k][4]; /* [6, 12] */ tmp = 1.0 / rsd[i][j][k - 1][0]; /* [6, 12] */ u21km1 = tmp * rsd[i][j][k - 1][1]; /* [6, 12] */ u31km1 = tmp * rsd[i][j][k - 1][2]; /* [6, 12] */ u41km1 = tmp * rsd[i][j][k - 1][3]; /* [6, 12] */ u51km1 = tmp * rsd[i][j][k - 1][4]; /* [6, 12] */ flux[i][j][k][1] = tz3 * (u21k - u21km1); /* [6, 12] */ flux[i][j][k][2] = tz3 * (u31k - u31km1); /* [6, 12] */ flux[i][j][k][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /* [6, 12] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * ((u21k * u21k + u31k * u31k + u41k * u41k) - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + (1.0 / 6.0) * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (k = 1; k <= nz - 2; k++) { /* [6, 12] */ /* [6, 12] */ frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * (rsd[i][j][k + 1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k - 1][0]); /* [6, 12] */ frct[i][j][k][1] = frct[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (rsd[i][j][k + 1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k - 1][1]); /* [6, 12] */ frct[i][j][k][2] = frct[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (rsd[i][j][k + 1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k - 1][2]); /* [6, 12] */ frct[i][j][k][3] = frct[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (rsd[i][j][k + 1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k - 1][3]); /* [6, 12] */ frct[i][j][k][4] = frct[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (rsd[i][j][k + 1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k - 1][4]); } /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (m = 0; m < 5; m++) { /* [6, 12] */ /* [6, 12] */ frct[i][j][1][m] = frct[i][j][1][m] - dsspm * (+5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m]); /* [6, 12] */ frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (-4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]); } /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (k = 3; k <= nz - 4; k++) { /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (m = 0; m < 5; m++) { /* [6, 12] */ /* [6, 12] */ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]); } } /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (m = 0; m < 5; m++) { /* [6, 12] */ /* [6, 12] */ frct[i][j][nz - 3][m] = frct[i][j][nz - 3][m] - dsspm * (rsd[i][j][nz - 5][m] - 4.0 * rsd[i][j][nz - 4][m] + 6.0 * rsd[i][j][nz - 3][m] - 4.0 * rsd[i][j][nz - 2][m]); /* [6, 12] */ frct[i][j][nz - 2][m] = frct[i][j][nz - 2][m] - dsspm * (rsd[i][j][nz - 4][m] - 4.0 * rsd[i][j][nz - 3][m] + 5.0 * rsd[i][j][nz - 2][m]); } } } /* [13] */ /* [13] */ /* [13] */ #pragma omp master { /* [13] */ /* [13] */ nthreads = omp_get_num_threads(); /* [13] */ } /* [13] */ int i; /* [13] */ int j; /* [13] */ int k; /* [13] */ int m; /* [13] */ int istep; /* [13] */ double tmp; /* [13] */ double delunm[5]; /* [13] */ double tv[12][12][5]; /* [13] */ tmp = 1.0 / (omega * (2.0 - omega)); /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ for (i = 0; i < 12; i++) { /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ for (j = 0; j < 12; j++) { /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ for (k = 0; k < 5; k++) { /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ for (m = 0; m < 5; m++) { /* [13, 14] */ /* [13, 14] */ a[i][j][k][m] = 0.0; /* [13, 14] */ b[i][j][k][m] = 0.0; /* [13, 14] */ c[i][j][k][m] = 0.0; /* [13, 14] */ d[i][j][k][m] = 0.0; } } } } /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ int i_imopVarPre84; /* [13, 14, 15] */ int j_imopVarPre85; /* [13, 14, 15] */ int k_imopVarPre86; /* [13, 14, 15] */ int m_imopVarPre87; /* [13, 14, 15] */ int L1; /* [13, 14, 15] */ int L2; /* [13, 14, 15] */ int ist1; /* [13, 14, 15] */ int iend1; /* [13, 14, 15] */ int jst1; /* [13, 14, 15] */ int jend1; /* [13, 14, 15] */ double q; /* [13, 14, 15] */ double u21; /* [13, 14, 15] */ double u31; /* [13, 14, 15] */ double u41; /* [13, 14, 15] */ double tmp_imopVarPre88; /* [13, 14, 15] */ double u21i; /* [13, 14, 15] */ double u31i; /* [13, 14, 15] */ double u41i; /* [13, 14, 15] */ double u51i; /* [13, 14, 15] */ double u21j; /* [13, 14, 15] */ double u31j; /* [13, 14, 15] */ double u41j; /* [13, 14, 15] */ double u51j; /* [13, 14, 15] */ double u21k; /* [13, 14, 15] */ double u31k; /* [13, 14, 15] */ double u41k; /* [13, 14, 15] */ double u51k; /* [13, 14, 15] */ double u21im1; /* [13, 14, 15] */ double u31im1; /* [13, 14, 15] */ double u41im1; /* [13, 14, 15] */ double u51im1; /* [13, 14, 15] */ double u21jm1; /* [13, 14, 15] */ double u31jm1; /* [13, 14, 15] */ double u41jm1; /* [13, 14, 15] */ double u51jm1; /* [13, 14, 15] */ double u21km1; /* [13, 14, 15] */ double u31km1; /* [13, 14, 15] */ double u41km1; /* [13, 14, 15] */ double u51km1; /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ for (i_imopVarPre84 = 0; i_imopVarPre84 <= nx - 1; i_imopVarPre84++) { /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ for (j_imopVarPre85 = 0; j_imopVarPre85 <= ny - 1; j_imopVarPre85++) { /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 15] */ /* [13, 14, 15] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = -frct[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]; } } } } /* [13, 14, 15] */ // /* [13, 14, 15] */ /* [13, 14, 16] */ L1 = 0; /* [13, 14, 16] */ L2 = nx - 1; /* [13, 14, 16] */ /* [13, 14, 16] */ /* [13, 14, 16] */ /* [13, 14, 16] */ for (i_imopVarPre84 = L1; i_imopVarPre84 <= L2; i_imopVarPre84++) { /* [13, 14, 16] */ /* [13, 14, 16] */ /* [13, 14, 16] */ /* [13, 14, 16] */ /* [13, 14, 16] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [13, 14, 16] */ /* [13, 14, 16] */ /* [13, 14, 16] */ /* [13, 14, 16] */ /* [13, 14, 16] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [13, 14, 16] */ /* [13, 14, 16] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /* [13, 14, 16] */ u21 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 16] */ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 16] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u21 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /* [13, 14, 16] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u21; /* [13, 14, 16] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u21; /* [13, 14, 16] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u21; } } } /* [13, 14, 16] */ // /* [13, 14, 16] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 17] */ /* [13, 14, 17] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tx2 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } /* [13, 14, 17] */ L2 = nx - 1; /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= L2; i_imopVarPre84++) { /* [13, 14, 17] */ /* [13, 14, 17] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 17] */ u21i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /* [13, 14, 17] */ u31i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /* [13, 14, 17] */ u41i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /* [13, 14, 17] */ u51i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /* [13, 14, 17] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 17] */ u21im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1]; /* [13, 14, 17] */ u31im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2]; /* [13, 14, 17] */ u41im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3]; /* [13, 14, 17] */ u51im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4]; /* [13, 14, 17] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /* [13, 14, 17] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tx3 * (u31i - u31im1); /* [13, 14, 17] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = tx3 * (u41i - u41im1); /* [13, 14, 17] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [13, 14, 17] */ /* [13, 14, 17] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dx1 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][0]); /* [13, 14, 17] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dx2 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1]); /* [13, 14, 17] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dx3 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2]); /* [13, 14, 17] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dx4 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3]); /* [13, 14, 17] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dx5 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4]); } /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 17] */ /* [13, 14, 17] */ rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); /* [13, 14, 17] */ rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } /* [13, 14, 17] */ ist1 = 3; /* [13, 14, 17] */ iend1 = nx - 4; /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (i_imopVarPre84 = ist1; i_imopVarPre84 <= iend1; i_imopVarPre84++) { /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 17] */ /* [13, 14, 17] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84 - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84 + 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 17] */ /* [13, 14, 17] */ rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 5][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); /* [13, 14, 17] */ rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } } /* [13, 14, 17] */ // /* [13, 14, 17] */ /* [13, 14, 18] */ L1 = 0; /* [13, 14, 18] */ L2 = ny - 1; /* [13, 14, 18] */ /* [13, 14, 18] */ /* [13, 14, 18] */ /* [13, 14, 18] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [13, 14, 18] */ /* [13, 14, 18] */ /* [13, 14, 18] */ /* [13, 14, 18] */ /* [13, 14, 18] */ for (j_imopVarPre85 = L1; j_imopVarPre85 <= L2; j_imopVarPre85++) { /* [13, 14, 18] */ /* [13, 14, 18] */ /* [13, 14, 18] */ /* [13, 14, 18] */ /* [13, 14, 18] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [13, 14, 18] */ /* [13, 14, 18] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /* [13, 14, 18] */ u31 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 18] */ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 18] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u31; /* [13, 14, 18] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u31 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /* [13, 14, 18] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u31; /* [13, 14, 18] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u31; } } } /* [13, 14, 18] */ // /* [13, 14, 18] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 19] */ /* [13, 14, 19] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - ty2 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87]); } } /* [13, 14, 19] */ L2 = ny - 1; /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= L2; j_imopVarPre85++) { /* [13, 14, 19] */ /* [13, 14, 19] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 19] */ u21j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /* [13, 14, 19] */ u31j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /* [13, 14, 19] */ u41j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /* [13, 14, 19] */ u51j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /* [13, 14, 19] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0]; /* [13, 14, 19] */ u21jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1]; /* [13, 14, 19] */ u31jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2]; /* [13, 14, 19] */ u41jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3]; /* [13, 14, 19] */ u51jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4]; /* [13, 14, 19] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = ty3 * (u21j - u21jm1); /* [13, 14, 19] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /* [13, 14, 19] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = ty3 * (u41j - u41jm1); /* [13, 14, 19] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [13, 14, 19] */ /* [13, 14, 19] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dy1 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][0]); /* [13, 14, 19] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dy2 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1]); /* [13, 14, 19] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dy3 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2]); /* [13, 14, 19] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dy4 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3]); /* [13, 14, 19] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dy5 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4]); } /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 19] */ /* [13, 14, 19] */ rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87]); /* [13, 14, 19] */ rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][4][k_imopVarPre86][m_imopVarPre87]); } /* [13, 14, 19] */ jst1 = 3; /* [13, 14, 19] */ jend1 = ny - 4; /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (j_imopVarPre85 = jst1; j_imopVarPre85 <= jend1; j_imopVarPre85++) { /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 19] */ /* [13, 14, 19] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85 - 2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85 + 2][k_imopVarPre86][m_imopVarPre87]); } } /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 19] */ /* [13, 14, 19] */ rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 5][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]); /* [13, 14, 19] */ rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]); } } } /* [13, 14, 19] */ // /* [13, 14, 19] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /* [13, 14, 20] */ /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /* [13, 14, 20] */ u41 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 20] */ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u41; /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u41; /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u41 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u41; } /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 20] */ /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tz2 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87]); } } /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /* [13, 14, 20] */ /* [13, 14, 20] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 20] */ u21k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /* [13, 14, 20] */ u31k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /* [13, 14, 20] */ u41k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /* [13, 14, 20] */ u51k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /* [13, 14, 20] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0]; /* [13, 14, 20] */ u21km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1]; /* [13, 14, 20] */ u31km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2]; /* [13, 14, 20] */ u41km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3]; /* [13, 14, 20] */ u51km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4]; /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = tz3 * (u21k - u21km1); /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tz3 * (u31k - u31km1); /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [13, 14, 20] */ /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dz1 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][0]); /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dz2 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1]); /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dz3 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2]); /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dz4 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3]); /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dz5 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4]); } /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 20] */ /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87]); /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][4][m_imopVarPre87]); } /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (k_imopVarPre86 = 3; k_imopVarPre86 <= nz - 4; k_imopVarPre86++) { /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 20] */ /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 2][m_imopVarPre87]); } } /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 20] */ /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 5][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]); /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]); } } } /* [13, 14, 21] */ /* [13, 14, 21] */ /* [13, 14, 21] */ double (*v)[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [13, 14, 21] */ double *sum; /* [13, 14, 21] */ v = rsd; /* [13, 14, 21] */ sum = rsdnm; /* [13, 14, 21] */ int i_imopVarPre75; /* [13, 14, 21] */ int j_imopVarPre76; /* [13, 14, 21] */ int k_imopVarPre77; /* [13, 14, 21] */ int m_imopVarPre78; /* [13, 14, 21] */ double sum0 = 0.0; /* [13, 14, 21] */ double sum1 = 0.0; /* [13, 14, 21] */ double sum2 = 0.0; /* [13, 14, 21] */ double sum3 = 0.0; /* [13, 14, 21] */ double sum4 = 0.0; /* [13, 14, 21] */ #pragma omp single nowait { /* [13, 14, 21] */ /* [13, 14, 21] */ /* [13, 14, 21] */ /* [13, 14, 21] */ /* [13, 14, 21] */ for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) { /* [13, 14, 21] */ /* [13, 14, 21] */ sum[m_imopVarPre78] = 0.0; } } /* [13, 14, 21] */ // /* [13, 14, 21] */ /* [13, 14, 22] */ /* [13, 14, 22] */ /* [13, 14, 22] */ /* [13, 14, 22] */ for (i_imopVarPre75 = ist; i_imopVarPre75 <= iend; i_imopVarPre75++) { /* [13, 14, 22] */ /* [13, 14, 22] */ /* [13, 14, 22] */ /* [13, 14, 22] */ /* [13, 14, 22] */ for (j_imopVarPre76 = jst; j_imopVarPre76 <= jend; j_imopVarPre76++) { /* [13, 14, 22] */ /* [13, 14, 22] */ /* [13, 14, 22] */ /* [13, 14, 22] */ /* [13, 14, 22] */ for (k_imopVarPre77 = 1; k_imopVarPre77 <= nz0 - 2; k_imopVarPre77++) { /* [13, 14, 22] */ /* [13, 14, 22] */ sum0 = sum0 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0]; /* [13, 14, 22] */ sum1 = sum1 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1]; /* [13, 14, 22] */ sum2 = sum2 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2]; /* [13, 14, 22] */ sum3 = sum3 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3]; /* [13, 14, 22] */ sum4 = sum4 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4]; } } } /* [13, 14, 22] */ // /* [13, 14, 22] */ /* [13, 14, 22] */ /* [13, 14, 22] */ sum[0] += sum0; /* [13, 14, 22] */ sum[1] += sum1; /* [13, 14, 22] */ sum[2] += sum2; /* [13, 14, 22] */ sum[3] += sum3; /* [13, 14, 22] */ sum[4] += sum4; /* [13, 14, 22] */ // /* [13, 14, 22] */ // /* [13, 14, 22] */ /* [13, 14, 23] */ /* [13, 14, 23] */ /* [13, 14, 23] */ /* [13, 14, 23] */ /* [13, 14, 23] */ /* [13, 14, 23] */ for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) { /* [13, 14, 23] */ /* [13, 14, 23] */ double _imopVarPre154; /* [13, 14, 23] */ double _imopVarPre155; /* [13, 14, 23] */ _imopVarPre154 = sum[m_imopVarPre78] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /* [13, 14, 23] */ _imopVarPre155 = sqrt(_imopVarPre154); /* [13, 14, 23] */ /* [13, 14, 23] */ sum[m_imopVarPre78] = _imopVarPre155; } /* [13, 14] */ timer_clear(1); /* [13, 14] */ /* [13, 14] */ timer_start(1); /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ for (istep = 1; istep <= itmax; istep++) { /* [13, 14] */ /* [13, 14] */ int _imopVarPre372; /* [13, 14] */ int _imopVarPre370; /* [13, 14] */ int _imopVarPre371; /* [13, 14] */ _imopVarPre370 = istep % 20 == 0; /* [13, 14] */ /* [13, 14] */ if (!_imopVarPre370) { /* [13, 14] */ /* [13, 14] */ _imopVarPre371 = istep == itmax; /* [13, 14] */ /* [13, 14] */ if (!_imopVarPre371) { /* [13, 14] */ /* [13, 14] */ _imopVarPre371 = istep == 1; } /* [13, 14] */ _imopVarPre370 = _imopVarPre371; } /* [13, 14] */ /* [13, 14] */ if (_imopVarPre370) { /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ printf(" Time step %4d\n", istep); /* [13, 14] */ } /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ int _imopVarPre377; /* [13, 14, 24] */ int _imopVarPre378; /* [13, 14, 24] */ int _imopVarPre379; /* [13, 14, 24] */ int _imopVarPre380; /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ for (i = ist; i <= iend; i++) { /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ for (j = jst; j <= jend; j++) { /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ for (k = 1; k <= nz - 2; k++) { /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ for (m = 0; m < 5; m++) { /* [13, 14, 24] */ /* [13, 14, 24] */ rsd[i][j][k][m] = dt * rsd[i][j][k][m]; } } } } /* [13, 14, 24] */ // /* [13, 14, 24] */ /* [13, 14, 25] */ /* [13, 14, 25] */ /* [13, 14, 25] */ /* [13, 14, 25] */ for (k = 1; k <= nz - 2; k++) { /* [13, 14, 25] */ /* [13, 14, 25] */ jacld(k); /* [13, 14, 25] */ /* [13, 14, 25] */ blts(nx, ny, nz, k, omega, rsd, a, b, c, d, ist, iend, jst, jend, nx0, ny0); /* [13, 14, 25] */ } /* [13, 14, 25] */ // /* [13, 14, 25] */ /* [13, 14, 26] */ /* [13, 14, 26] */ /* [13, 14, 26] */ /* [13, 14, 26] */ for (k = nz - 2; k >= 1; k--) { /* [13, 14, 26] */ /* [13, 14, 26] */ jacu(k); /* [13, 14, 26] */ /* [13, 14, 26] */ buts(nx, ny, nz, k, omega, rsd, tv, d, a, b, c, ist, iend, jst, jend, nx0, ny0); /* [13, 14, 26] */ } /* [13, 14, 26] */ // /* [13, 14, 26] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ for (i = ist; i <= iend; i++) { /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ for (j = jst; j <= jend; j++) { /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ for (k = 1; k <= nz - 2; k++) { /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ for (m = 0; m < 5; m++) { /* [13, 14, 27] */ /* [13, 14, 27] */ u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m]; } } } } /* [13, 14, 27] */ /* [13, 14, 27] */ if (istep % inorm == 0) { /* [13, 14, 27] */ /* [13, 14, 27] */ double (*v)[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [13, 14, 27] */ double *sum; /* [13, 14, 27] */ v = rsd; /* [13, 14, 27] */ sum = delunm; /* [13, 14, 27] */ int i_imopVarPre89; /* [13, 14, 27] */ int j_imopVarPre90; /* [13, 14, 27] */ int k_imopVarPre91; /* [13, 14, 27] */ int m_imopVarPre92; /* [13, 14, 27] */ double sum0 = 0.0; /* [13, 14, 27] */ double sum1 = 0.0; /* [13, 14, 27] */ double sum2 = 0.0; /* [13, 14, 27] */ double sum3 = 0.0; /* [13, 14, 27] */ double sum4 = 0.0; /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) { /* [13, 14, 27] */ /* [13, 14, 27] */ sum[m_imopVarPre92] = 0.0; } /* [13, 14, 27] */ // /* [13, 14, 27] */ /* [13, 14, 28] */ /* [13, 14, 28] */ /* [13, 14, 28] */ /* [13, 14, 28] */ for (i_imopVarPre89 = ist; i_imopVarPre89 <= iend; i_imopVarPre89++) { /* [13, 14, 28] */ /* [13, 14, 28] */ /* [13, 14, 28] */ /* [13, 14, 28] */ /* [13, 14, 28] */ for (j_imopVarPre90 = jst; j_imopVarPre90 <= jend; j_imopVarPre90++) { /* [13, 14, 28] */ /* [13, 14, 28] */ /* [13, 14, 28] */ /* [13, 14, 28] */ /* [13, 14, 28] */ for (k_imopVarPre91 = 1; k_imopVarPre91 <= nz0 - 2; k_imopVarPre91++) { /* [13, 14, 28] */ /* [13, 14, 28] */ sum0 = sum0 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0]; /* [13, 14, 28] */ sum1 = sum1 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1]; /* [13, 14, 28] */ sum2 = sum2 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2]; /* [13, 14, 28] */ sum3 = sum3 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3]; /* [13, 14, 28] */ sum4 = sum4 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4]; } } } /* [13, 14, 28] */ // /* [13, 14, 28] */ /* [13, 14, 28] */ /* [13, 14, 28] */ sum[0] += sum0; /* [13, 14, 28] */ sum[1] += sum1; /* [13, 14, 28] */ sum[2] += sum2; /* [13, 14, 28] */ sum[3] += sum3; /* [13, 14, 28] */ sum[4] += sum4; /* [13, 14, 28] */ // /* [13, 14, 28] */ // /* [13, 14, 28] */ /* [13, 14, 29] */ /* [13, 14, 29] */ /* [13, 14, 29] */ /* [13, 14, 29] */ /* [13, 14, 29] */ /* [13, 14, 29] */ for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) { /* [13, 14, 29] */ /* [13, 14, 29] */ double _imopVarPre154; /* [13, 14, 29] */ double _imopVarPre155; /* [13, 14, 29] */ _imopVarPre154 = sum[m_imopVarPre92] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /* [13, 14, 29] */ _imopVarPre155 = sqrt(_imopVarPre154); /* [13, 14, 29] */ /* [13, 14, 29] */ sum[m_imopVarPre92] = _imopVarPre155; } /* [13, 14, 29] */ // /* [13, 14, 29] */ /* [13, 14, 30] */ // /* [13, 14, 30] */ } /* [13, 14, 27, 31] */ // /* [13, 14, 27, 31] */ /* [13, 14, 28, 32] */ int i_imopVarPre79; /* [13, 14, 28, 32] */ int j_imopVarPre80; /* [13, 14, 28, 32] */ int k_imopVarPre81; /* [13, 14, 28, 32] */ int m_imopVarPre82; /* [13, 14, 28, 32] */ int L1; /* [13, 14, 28, 32] */ int L2; /* [13, 14, 28, 32] */ int ist1; /* [13, 14, 28, 32] */ int iend1; /* [13, 14, 28, 32] */ int jst1; /* [13, 14, 28, 32] */ int jend1; /* [13, 14, 28, 32] */ double q; /* [13, 14, 28, 32] */ double u21; /* [13, 14, 28, 32] */ double u31; /* [13, 14, 28, 32] */ double u41; /* [13, 14, 28, 32] */ double tmp_imopVarPre83; /* [13, 14, 28, 32] */ double u21i; /* [13, 14, 28, 32] */ double u31i; /* [13, 14, 28, 32] */ double u41i; /* [13, 14, 28, 32] */ double u51i; /* [13, 14, 28, 32] */ double u21j; /* [13, 14, 28, 32] */ double u31j; /* [13, 14, 28, 32] */ double u41j; /* [13, 14, 28, 32] */ double u51j; /* [13, 14, 28, 32] */ double u21k; /* [13, 14, 28, 32] */ double u31k; /* [13, 14, 28, 32] */ double u41k; /* [13, 14, 28, 32] */ double u51k; /* [13, 14, 28, 32] */ double u21im1; /* [13, 14, 28, 32] */ double u31im1; /* [13, 14, 28, 32] */ double u41im1; /* [13, 14, 28, 32] */ double u51im1; /* [13, 14, 28, 32] */ double u21jm1; /* [13, 14, 28, 32] */ double u31jm1; /* [13, 14, 28, 32] */ double u41jm1; /* [13, 14, 28, 32] */ double u51jm1; /* [13, 14, 28, 32] */ double u21km1; /* [13, 14, 28, 32] */ double u31km1; /* [13, 14, 28, 32] */ double u41km1; /* [13, 14, 28, 32] */ double u51km1; /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ for (i_imopVarPre79 = 0; i_imopVarPre79 <= nx - 1; i_imopVarPre79++) { /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ for (j_imopVarPre80 = 0; j_imopVarPre80 <= ny - 1; j_imopVarPre80++) { /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = -frct[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]; } } } } /* [13, 14, 28, 32] */ // /* [13, 14, 28, 32] */ /* [13, 14, 29, 33] */ L1 = 0; /* [13, 14, 29, 33] */ L2 = nx - 1; /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ for (i_imopVarPre79 = L1; i_imopVarPre79 <= L2; i_imopVarPre79++) { /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /* [13, 14, 29, 33] */ u21 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 29, 33] */ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 29, 33] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u21 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /* [13, 14, 29, 33] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u21; /* [13, 14, 29, 33] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u21; /* [13, 14, 29, 33] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u21; } } } /* [13, 14, 29, 33] */ // /* [13, 14, 29, 33] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tx2 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } /* [13, 14, 30, 34] */ L2 = nx - 1; /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= L2; i_imopVarPre79++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 30, 34] */ u21i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /* [13, 14, 30, 34] */ u31i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /* [13, 14, 30, 34] */ u41i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /* [13, 14, 30, 34] */ u51i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /* [13, 14, 30, 34] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 30, 34] */ u21im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1]; /* [13, 14, 30, 34] */ u31im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2]; /* [13, 14, 30, 34] */ u41im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3]; /* [13, 14, 30, 34] */ u51im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4]; /* [13, 14, 30, 34] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /* [13, 14, 30, 34] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tx3 * (u31i - u31im1); /* [13, 14, 30, 34] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = tx3 * (u41i - u41im1); /* [13, 14, 30, 34] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dx1 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][0]); /* [13, 14, 30, 34] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dx2 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1]); /* [13, 14, 30, 34] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dx3 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2]); /* [13, 14, 30, 34] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dx4 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3]); /* [13, 14, 30, 34] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dx5 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4]); } /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); /* [13, 14, 30, 34] */ rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } /* [13, 14, 30, 34] */ ist1 = 3; /* [13, 14, 30, 34] */ iend1 = nx - 4; /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (i_imopVarPre79 = ist1; i_imopVarPre79 <= iend1; i_imopVarPre79++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79 - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79 + 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 5][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); /* [13, 14, 30, 34] */ rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } } /* [13, 14, 30, 34] */ // /* [13, 14, 30, 34] */ /* [13, 14, 31, 35] */ L1 = 0; /* [13, 14, 31, 35] */ L2 = ny - 1; /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ for (j_imopVarPre80 = L1; j_imopVarPre80 <= L2; j_imopVarPre80++) { /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /* [13, 14, 31, 35] */ u31 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 31, 35] */ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 31, 35] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u31; /* [13, 14, 31, 35] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u31 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /* [13, 14, 31, 35] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u31; /* [13, 14, 31, 35] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u31; } } } /* [13, 14, 31, 35] */ // /* [13, 14, 31, 35] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - ty2 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82]); } } /* [13, 14, 32, 36] */ L2 = ny - 1; /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= L2; j_imopVarPre80++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 32, 36] */ u21j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /* [13, 14, 32, 36] */ u31j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /* [13, 14, 32, 36] */ u41j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /* [13, 14, 32, 36] */ u51j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /* [13, 14, 32, 36] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0]; /* [13, 14, 32, 36] */ u21jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1]; /* [13, 14, 32, 36] */ u31jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2]; /* [13, 14, 32, 36] */ u41jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3]; /* [13, 14, 32, 36] */ u51jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4]; /* [13, 14, 32, 36] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = ty3 * (u21j - u21jm1); /* [13, 14, 32, 36] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /* [13, 14, 32, 36] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = ty3 * (u41j - u41jm1); /* [13, 14, 32, 36] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dy1 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][0]); /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dy2 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1]); /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dy3 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2]); /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dy4 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3]); /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dy5 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4]); } /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82]); /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][4][k_imopVarPre81][m_imopVarPre82]); } /* [13, 14, 32, 36] */ jst1 = 3; /* [13, 14, 32, 36] */ jend1 = ny - 4; /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (j_imopVarPre80 = jst1; j_imopVarPre80 <= jend1; j_imopVarPre80++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80 - 2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80 + 2][k_imopVarPre81][m_imopVarPre82]); } } /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 5][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]); /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]); } } } /* [13, 14, 32, 36] */ // /* [13, 14, 32, 36] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /* [13, 14, 33, 37] */ u41 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 33, 37] */ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u41; /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u41; /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u41 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u41; } /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tz2 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82]); } } /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 33, 37] */ u21k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /* [13, 14, 33, 37] */ u31k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /* [13, 14, 33, 37] */ u41k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /* [13, 14, 33, 37] */ u51k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /* [13, 14, 33, 37] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0]; /* [13, 14, 33, 37] */ u21km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1]; /* [13, 14, 33, 37] */ u31km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2]; /* [13, 14, 33, 37] */ u41km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3]; /* [13, 14, 33, 37] */ u51km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4]; /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = tz3 * (u21k - u21km1); /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tz3 * (u31k - u31km1); /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dz1 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][0]); /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dz2 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1]); /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dz3 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2]); /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dz4 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3]); /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dz5 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4]); } /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82]); /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][4][m_imopVarPre82]); } /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (k_imopVarPre81 = 3; k_imopVarPre81 <= nz - 4; k_imopVarPre81++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 2][m_imopVarPre82]); } } /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 5][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]); /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]); } } } /* [13, 14, 33, 37] */ // /* [13, 14, 33, 37] */ /* [13, 14, 34, 38] */ // /* [13, 14, 34, 38] */ /* [13, 14, 35, 39] */ /* [13, 14, 35, 39] */ /* [13, 14, 35, 39] */ _imopVarPre372 = (istep % inorm == 0); /* [13, 14, 35, 39] */ /* [13, 14, 35, 39] */ if (!_imopVarPre372) { /* [13, 14, 35, 39] */ /* [13, 14, 35, 39] */ _imopVarPre372 = (istep == itmax); } /* [13, 14, 35, 39] */ // /* [13, 14, 35, 39] */ /* [13, 14, 36, 40] */ /* [13, 14, 36, 40] */ if (_imopVarPre372) { /* [13, 14, 36, 40] */ /* [13, 14, 36, 40] */ double (*v)[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [13, 14, 36, 40] */ double *sum; /* [13, 14, 36, 40] */ v = rsd; /* [13, 14, 36, 40] */ sum = rsdnm; /* [13, 14, 36, 40] */ int i_imopVarPre93; /* [13, 14, 36, 40] */ int j_imopVarPre94; /* [13, 14, 36, 40] */ int k_imopVarPre95; /* [13, 14, 36, 40] */ int m_imopVarPre96; /* [13, 14, 36, 40] */ double sum0 = 0.0; /* [13, 14, 36, 40] */ double sum1 = 0.0; /* [13, 14, 36, 40] */ double sum2 = 0.0; /* [13, 14, 36, 40] */ double sum3 = 0.0; /* [13, 14, 36, 40] */ double sum4 = 0.0; /* [13, 14, 36, 40] */ /* [13, 14, 36, 40] */ /* [13, 14, 36, 40] */ /* [13, 14, 36, 40] */ /* [13, 14, 36, 40] */ /* [13, 14, 36, 40] */ for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) { /* [13, 14, 36, 40] */ /* [13, 14, 36, 40] */ sum[m_imopVarPre96] = 0.0; } /* [13, 14, 36, 40] */ // /* [13, 14, 36, 40] */ /* [13, 14, 37] */ /* [13, 14, 37] */ /* [13, 14, 37] */ /* [13, 14, 37] */ for (i_imopVarPre93 = ist; i_imopVarPre93 <= iend; i_imopVarPre93++) { /* [13, 14, 37] */ /* [13, 14, 37] */ /* [13, 14, 37] */ /* [13, 14, 37] */ /* [13, 14, 37] */ for (j_imopVarPre94 = jst; j_imopVarPre94 <= jend; j_imopVarPre94++) { /* [13, 14, 37] */ /* [13, 14, 37] */ /* [13, 14, 37] */ /* [13, 14, 37] */ /* [13, 14, 37] */ for (k_imopVarPre95 = 1; k_imopVarPre95 <= nz0 - 2; k_imopVarPre95++) { /* [13, 14, 37] */ /* [13, 14, 37] */ sum0 = sum0 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0]; /* [13, 14, 37] */ sum1 = sum1 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1]; /* [13, 14, 37] */ sum2 = sum2 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2]; /* [13, 14, 37] */ sum3 = sum3 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3]; /* [13, 14, 37] */ sum4 = sum4 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4]; } } } /* [13, 14, 37] */ // /* [13, 14, 37] */ /* [13, 14, 37] */ /* [13, 14, 37] */ sum[0] += sum0; /* [13, 14, 37] */ sum[1] += sum1; /* [13, 14, 37] */ sum[2] += sum2; /* [13, 14, 37] */ sum[3] += sum3; /* [13, 14, 37] */ sum[4] += sum4; /* [13, 14, 37] */ // /* [13, 14, 37] */ // /* [13, 14, 37] */ /* [13, 14, 38] */ /* [13, 14, 38] */ /* [13, 14, 38] */ /* [13, 14, 38] */ /* [13, 14, 38] */ /* [13, 14, 38] */ for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) { /* [13, 14, 38] */ /* [13, 14, 38] */ double _imopVarPre154; /* [13, 14, 38] */ double _imopVarPre155; /* [13, 14, 38] */ _imopVarPre154 = sum[m_imopVarPre96] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /* [13, 14, 38] */ _imopVarPre155 = sqrt(_imopVarPre154); /* [13, 14, 38] */ /* [13, 14, 38] */ sum[m_imopVarPre96] = _imopVarPre155; } /* [13, 14, 38] */ // /* [13, 14, 38] */ /* [13, 14, 39] */ // /* [13, 14, 39] */ } /* [13, 14, 36, 40] */ // /* [13, 14, 36, 40] */ /* [13, 14, 37] */ /* [13, 14, 37] */ /* [13, 14, 37] */ _imopVarPre377 = (rsdnm[0] < tolrsd[0]); /* [13, 14, 37] */ /* [13, 14, 37] */ if (_imopVarPre377) { /* [13, 14, 37] */ /* [13, 14, 37] */ _imopVarPre378 = (rsdnm[1] < tolrsd[1]); /* [13, 14, 37] */ /* [13, 14, 37] */ if (_imopVarPre378) { /* [13, 14, 37] */ /* [13, 14, 37] */ _imopVarPre379 = (rsdnm[2] < tolrsd[2]); /* [13, 14, 37] */ /* [13, 14, 37] */ if (_imopVarPre379) { /* [13, 14, 37] */ /* [13, 14, 37] */ _imopVarPre380 = (rsdnm[3] < tolrsd[3]); /* [13, 14, 37] */ /* [13, 14, 37] */ if (_imopVarPre380) { /* [13, 14, 37] */ /* [13, 14, 37] */ _imopVarPre380 = (rsdnm[4] < tolrsd[4]); } /* [13, 14, 37] */ _imopVarPre379 = _imopVarPre380; } /* [13, 14, 37] */ _imopVarPre378 = _imopVarPre379; } /* [13, 14, 37] */ _imopVarPre377 = _imopVarPre378; } /* [13, 14, 37] */ /* [13, 14, 37] */ if (_imopVarPre377) { /* [13, 14, 37] */ /* [13, 14, 37] */ exit(1); /* [13, 14, 37] */ } } /* [13, 14] */ timer_stop(1); /* [13, 14] */ /* [13, 14] */ maxtime = timer_read(1); /* [13, 14] */ /* [] */ error(); /* [] */ /* [] */ pintgr(); /* [] */ /* [] */ int *_imopVarPre144; /* [] */ char *_imopVarPre145; /* [] */ _imopVarPre144 = &verified; /* [] */ _imopVarPre145 = &class; /* [] */ verify(rsdnm, errnm, frc, _imopVarPre145, _imopVarPre144); /* [] */ /* [] */ mflops = (double)itmax *(1984.77 * (double)nx0 * (double)ny0 * (double)nz0 - 10923.3 * (((double)(nx0 + ny0 + nz0) / 3.0) * ((double)(nx0 + ny0 + nz0) / 3.0)) + 27770.9 * (double)(nx0 + ny0 + nz0) / 3.0 - 144010.0) / (maxtime * 1000000.0); /* [] */ c_print_results("LU", class, nx0, ny0, nz0, itmax, nthreads, maxtime, mflops, " floating point", verified, "3.0 structured", "21 Jul 2017", "gcc", "gcc", "(none)", "-I../common", "-O3 -fopenmp", "-O3 -fopenmp", "(none)"); /* [] */ } /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ static void blts(int nx, int ny, int nz, int k, double omega, double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5], double ldz[12][12][5][5], double ldy[12][12][5][5], double ldx[12][12][5][5], double d[12][12][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ int i; /* [13, 14, 25, 41] */ int j; /* [13, 14, 25, 41] */ int m; /* [13, 14, 25, 41] */ double tmp; /* [13, 14, 25, 41] */ double tmp1; /* [13, 14, 25, 41] */ double tmat[5][5]; /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (i = ist; i <= iend; i++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (j = jst; j <= jend; j++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (m = 0; m < 5; m++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ v[i][j][k][m] = v[i][j][k][m] - omega * (ldz[i][j][m][0] * v[i][j][k - 1][0] + ldz[i][j][m][1] * v[i][j][k - 1][1] + ldz[i][j][m][2] * v[i][j][k - 1][2] + ldz[i][j][m][3] * v[i][j][k - 1][3] + ldz[i][j][m][4] * v[i][j][k - 1][4]); } } } /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (i = ist; i <= iend; i++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ if (i != ist) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ while (flag[i - 1] == 0) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ // /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ ; } } /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ if (i != iend) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ while (flag[i] == 1) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ // /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ ; } } /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (j = jst; j <= jend; j++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (m = 0; m < 5; m++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ v[i][j][k][m] = v[i][j][k][m] - omega * (ldy[i][j][m][0] * v[i][j - 1][k][0] + ldx[i][j][m][0] * v[i - 1][j][k][0] + ldy[i][j][m][1] * v[i][j - 1][k][1] + ldx[i][j][m][1] * v[i - 1][j][k][1] + ldy[i][j][m][2] * v[i][j - 1][k][2] + ldx[i][j][m][2] * v[i - 1][j][k][2] + ldy[i][j][m][3] * v[i][j - 1][k][3] + ldx[i][j][m][3] * v[i - 1][j][k][3] + ldy[i][j][m][4] * v[i][j - 1][k][4] + ldx[i][j][m][4] * v[i - 1][j][k][4]); } /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (m = 0; m < 5; m++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ tmat[m][0] = d[i][j][m][0]; /* [13, 14, 25, 41] */ tmat[m][1] = d[i][j][m][1]; /* [13, 14, 25, 41] */ tmat[m][2] = d[i][j][m][2]; /* [13, 14, 25, 41] */ tmat[m][3] = d[i][j][m][3]; /* [13, 14, 25, 41] */ tmat[m][4] = d[i][j][m][4]; } /* [13, 14, 25, 41] */ tmp1 = 1.0 / tmat[0][0]; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[1][0]; /* [13, 14, 25, 41] */ tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; /* [13, 14, 25, 41] */ tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; /* [13, 14, 25, 41] */ tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; /* [13, 14, 25, 41] */ tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; /* [13, 14, 25, 41] */ v[i][j][k][1] = v[i][j][k][1] - v[i][j][k][0] * tmp; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[2][0]; /* [13, 14, 25, 41] */ tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; /* [13, 14, 25, 41] */ tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; /* [13, 14, 25, 41] */ tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; /* [13, 14, 25, 41] */ tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; /* [13, 14, 25, 41] */ v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][0] * tmp; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[3][0]; /* [13, 14, 25, 41] */ tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; /* [13, 14, 25, 41] */ tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; /* [13, 14, 25, 41] */ tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; /* [13, 14, 25, 41] */ tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; /* [13, 14, 25, 41] */ v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][0] * tmp; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[4][0]; /* [13, 14, 25, 41] */ tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; /* [13, 14, 25, 41] */ tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; /* [13, 14, 25, 41] */ tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; /* [13, 14, 25, 41] */ tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; /* [13, 14, 25, 41] */ v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][0] * tmp; /* [13, 14, 25, 41] */ tmp1 = 1.0 / tmat[1][1]; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[2][1]; /* [13, 14, 25, 41] */ tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; /* [13, 14, 25, 41] */ tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; /* [13, 14, 25, 41] */ tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; /* [13, 14, 25, 41] */ v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][1] * tmp; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[3][1]; /* [13, 14, 25, 41] */ tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; /* [13, 14, 25, 41] */ tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; /* [13, 14, 25, 41] */ tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; /* [13, 14, 25, 41] */ v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][1] * tmp; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[4][1]; /* [13, 14, 25, 41] */ tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; /* [13, 14, 25, 41] */ tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; /* [13, 14, 25, 41] */ tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; /* [13, 14, 25, 41] */ v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][1] * tmp; /* [13, 14, 25, 41] */ tmp1 = 1.0 / tmat[2][2]; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[3][2]; /* [13, 14, 25, 41] */ tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; /* [13, 14, 25, 41] */ tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; /* [13, 14, 25, 41] */ v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][2] * tmp; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[4][2]; /* [13, 14, 25, 41] */ tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; /* [13, 14, 25, 41] */ tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; /* [13, 14, 25, 41] */ v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][2] * tmp; /* [13, 14, 25, 41] */ tmp1 = 1.0 / tmat[3][3]; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[4][3]; /* [13, 14, 25, 41] */ tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; /* [13, 14, 25, 41] */ v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][3] * tmp; /* [13, 14, 25, 41] */ v[i][j][k][4] = v[i][j][k][4] / tmat[4][4]; /* [13, 14, 25, 41] */ v[i][j][k][3] = v[i][j][k][3] - tmat[3][4] * v[i][j][k][4]; /* [13, 14, 25, 41] */ v[i][j][k][3] = v[i][j][k][3] / tmat[3][3]; /* [13, 14, 25, 41] */ v[i][j][k][2] = v[i][j][k][2] - tmat[2][3] * v[i][j][k][3] - tmat[2][4] * v[i][j][k][4]; /* [13, 14, 25, 41] */ v[i][j][k][2] = v[i][j][k][2] / tmat[2][2]; /* [13, 14, 25, 41] */ v[i][j][k][1] = v[i][j][k][1] - tmat[1][2] * v[i][j][k][2] - tmat[1][3] * v[i][j][k][3] - tmat[1][4] * v[i][j][k][4]; /* [13, 14, 25, 41] */ v[i][j][k][1] = v[i][j][k][1] / tmat[1][1]; /* [13, 14, 25, 41] */ v[i][j][k][0] = v[i][j][k][0] - tmat[0][1] * v[i][j][k][1] - tmat[0][2] * v[i][j][k][2] - tmat[0][3] * v[i][j][k][3] - tmat[0][4] * v[i][j][k][4]; /* [13, 14, 25, 41] */ v[i][j][k][0] = v[i][j][k][0] / tmat[0][0]; } /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ if (i != ist) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ flag[i - 1] = 0; } /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ if (i != iend) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ flag[i] = 1; } /* [13, 14, 25, 41] */ // /* [13, 14, 25, 41] */ } } /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ static void buts(int nx, int ny, int nz, int k, double omega, double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5], double tv[12][12][5], double d[12][12][5][5], double udx[12][12][5][5], double udy[12][12][5][5], double udz[12][12][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ int i; /* [13, 14, 26, 42] */ int j; /* [13, 14, 26, 42] */ int m; /* [13, 14, 26, 42] */ double tmp; /* [13, 14, 26, 42] */ double tmp1; /* [13, 14, 26, 42] */ double tmat[5][5]; /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (i = iend; i >= ist; i--) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (j = jend; j >= jst; j--) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (m = 0; m < 5; m++) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ tv[i][j][m] = omega * (udz[i][j][m][0] * v[i][j][k + 1][0] + udz[i][j][m][1] * v[i][j][k + 1][1] + udz[i][j][m][2] * v[i][j][k + 1][2] + udz[i][j][m][3] * v[i][j][k + 1][3] + udz[i][j][m][4] * v[i][j][k + 1][4]); } } } /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (i = iend; i >= ist; i--) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ if (i != iend) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ while (flag[i + 1] == 0) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ // /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ ; } } /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ if (i != ist) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ while (flag[i] == 1) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ // /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ ; } } /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (j = jend; j >= jst; j--) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (m = 0; m < 5; m++) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ tv[i][j][m] = tv[i][j][m] + omega * (udy[i][j][m][0] * v[i][j + 1][k][0] + udx[i][j][m][0] * v[i + 1][j][k][0] + udy[i][j][m][1] * v[i][j + 1][k][1] + udx[i][j][m][1] * v[i + 1][j][k][1] + udy[i][j][m][2] * v[i][j + 1][k][2] + udx[i][j][m][2] * v[i + 1][j][k][2] + udy[i][j][m][3] * v[i][j + 1][k][3] + udx[i][j][m][3] * v[i + 1][j][k][3] + udy[i][j][m][4] * v[i][j + 1][k][4] + udx[i][j][m][4] * v[i + 1][j][k][4]); } /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (m = 0; m < 5; m++) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ tmat[m][0] = d[i][j][m][0]; /* [13, 14, 26, 42] */ tmat[m][1] = d[i][j][m][1]; /* [13, 14, 26, 42] */ tmat[m][2] = d[i][j][m][2]; /* [13, 14, 26, 42] */ tmat[m][3] = d[i][j][m][3]; /* [13, 14, 26, 42] */ tmat[m][4] = d[i][j][m][4]; } /* [13, 14, 26, 42] */ tmp1 = 1.0 / tmat[0][0]; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[1][0]; /* [13, 14, 26, 42] */ tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; /* [13, 14, 26, 42] */ tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; /* [13, 14, 26, 42] */ tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; /* [13, 14, 26, 42] */ tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; /* [13, 14, 26, 42] */ tv[i][j][1] = tv[i][j][1] - tv[i][j][0] * tmp; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[2][0]; /* [13, 14, 26, 42] */ tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; /* [13, 14, 26, 42] */ tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; /* [13, 14, 26, 42] */ tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; /* [13, 14, 26, 42] */ tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; /* [13, 14, 26, 42] */ tv[i][j][2] = tv[i][j][2] - tv[i][j][0] * tmp; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[3][0]; /* [13, 14, 26, 42] */ tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; /* [13, 14, 26, 42] */ tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; /* [13, 14, 26, 42] */ tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; /* [13, 14, 26, 42] */ tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; /* [13, 14, 26, 42] */ tv[i][j][3] = tv[i][j][3] - tv[i][j][0] * tmp; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[4][0]; /* [13, 14, 26, 42] */ tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; /* [13, 14, 26, 42] */ tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; /* [13, 14, 26, 42] */ tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; /* [13, 14, 26, 42] */ tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; /* [13, 14, 26, 42] */ tv[i][j][4] = tv[i][j][4] - tv[i][j][0] * tmp; /* [13, 14, 26, 42] */ tmp1 = 1.0 / tmat[1][1]; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[2][1]; /* [13, 14, 26, 42] */ tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; /* [13, 14, 26, 42] */ tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; /* [13, 14, 26, 42] */ tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; /* [13, 14, 26, 42] */ tv[i][j][2] = tv[i][j][2] - tv[i][j][1] * tmp; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[3][1]; /* [13, 14, 26, 42] */ tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; /* [13, 14, 26, 42] */ tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; /* [13, 14, 26, 42] */ tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; /* [13, 14, 26, 42] */ tv[i][j][3] = tv[i][j][3] - tv[i][j][1] * tmp; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[4][1]; /* [13, 14, 26, 42] */ tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; /* [13, 14, 26, 42] */ tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; /* [13, 14, 26, 42] */ tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; /* [13, 14, 26, 42] */ tv[i][j][4] = tv[i][j][4] - tv[i][j][1] * tmp; /* [13, 14, 26, 42] */ tmp1 = 1.0 / tmat[2][2]; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[3][2]; /* [13, 14, 26, 42] */ tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; /* [13, 14, 26, 42] */ tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; /* [13, 14, 26, 42] */ tv[i][j][3] = tv[i][j][3] - tv[i][j][2] * tmp; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[4][2]; /* [13, 14, 26, 42] */ tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; /* [13, 14, 26, 42] */ tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; /* [13, 14, 26, 42] */ tv[i][j][4] = tv[i][j][4] - tv[i][j][2] * tmp; /* [13, 14, 26, 42] */ tmp1 = 1.0 / tmat[3][3]; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[4][3]; /* [13, 14, 26, 42] */ tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; /* [13, 14, 26, 42] */ tv[i][j][4] = tv[i][j][4] - tv[i][j][3] * tmp; /* [13, 14, 26, 42] */ tv[i][j][4] = tv[i][j][4] / tmat[4][4]; /* [13, 14, 26, 42] */ tv[i][j][3] = tv[i][j][3] - tmat[3][4] * tv[i][j][4]; /* [13, 14, 26, 42] */ tv[i][j][3] = tv[i][j][3] / tmat[3][3]; /* [13, 14, 26, 42] */ tv[i][j][2] = tv[i][j][2] - tmat[2][3] * tv[i][j][3] - tmat[2][4] * tv[i][j][4]; /* [13, 14, 26, 42] */ tv[i][j][2] = tv[i][j][2] / tmat[2][2]; /* [13, 14, 26, 42] */ tv[i][j][1] = tv[i][j][1] - tmat[1][2] * tv[i][j][2] - tmat[1][3] * tv[i][j][3] - tmat[1][4] * tv[i][j][4]; /* [13, 14, 26, 42] */ tv[i][j][1] = tv[i][j][1] / tmat[1][1]; /* [13, 14, 26, 42] */ tv[i][j][0] = tv[i][j][0] - tmat[0][1] * tv[i][j][1] - tmat[0][2] * tv[i][j][2] - tmat[0][3] * tv[i][j][3] - tmat[0][4] * tv[i][j][4]; /* [13, 14, 26, 42] */ tv[i][j][0] = tv[i][j][0] / tmat[0][0]; /* [13, 14, 26, 42] */ v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0]; /* [13, 14, 26, 42] */ v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1]; /* [13, 14, 26, 42] */ v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2]; /* [13, 14, 26, 42] */ v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3]; /* [13, 14, 26, 42] */ v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4]; } /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ if (i != iend) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ flag[i + 1] = 0; } /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ if (i != ist) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ flag[i] = 1; } /* [13, 14, 26, 42] */ // /* [13, 14, 26, 42] */ } } /* [] */ static void domain() { /* [] */ /* [] */ nx = nx0; /* [] */ ny = ny0; /* [] */ nz = nz0; /* [] */ int _imopVarPre146; /* [] */ int _imopVarPre147; /* [] */ _imopVarPre146 = nx < 4; /* [] */ /* [] */ if (!_imopVarPre146) { /* [] */ /* [] */ _imopVarPre147 = ny < 4; /* [] */ /* [] */ if (!_imopVarPre147) { /* [] */ /* [] */ _imopVarPre147 = nz < 4; } /* [] */ _imopVarPre146 = _imopVarPre147; } /* [] */ /* [] */ if (_imopVarPre146) { /* [] */ /* [] */ printf(" SUBDOMAIN SIZE IS TOO SMALL - \n" " ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n" " SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n" " TO 4 THEY ARE CURRENTLY%3d%3d%3d\n", nx, ny, nz); /* [] */ /* [] */ exit(1); /* [] */ } /* [] */ int _imopVarPre148; /* [] */ int _imopVarPre149; /* [] */ _imopVarPre148 = nx > 12; /* [] */ /* [] */ if (!_imopVarPre148) { /* [] */ /* [] */ _imopVarPre149 = ny > 12; /* [] */ /* [] */ if (!_imopVarPre149) { /* [] */ /* [] */ _imopVarPre149 = nz > 12; } /* [] */ _imopVarPre148 = _imopVarPre149; } /* [] */ /* [] */ if (_imopVarPre148) { /* [] */ /* [] */ printf(" SUBDOMAIN SIZE IS TOO LARGE - \n" " ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n" " SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n" " ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n" " CURRENTLY%4d%4d%4d\n", nx, ny, nz); /* [] */ /* [] */ exit(1); /* [] */ } /* [] */ ist = 1; /* [] */ iend = nx - 2; /* [] */ jst = 1; /* [] */ jend = ny - 2; } /* [] */ static void erhs() { /* [] */ /* [43] */ /* [43] */ /* [43] */ int i; /* [43] */ int j; /* [43] */ int k; /* [43] */ int m; /* [43] */ int iglob; /* [43] */ int jglob; /* [43] */ int L1; /* [43] */ int L2; /* [43] */ int ist1; /* [43] */ int iend1; /* [43] */ int jst1; /* [43] */ int jend1; /* [43] */ double dsspm; /* [43] */ double xi; /* [43] */ double eta; /* [43] */ double zeta; /* [43] */ double q; /* [43] */ double u21; /* [43] */ double u31; /* [43] */ double u41; /* [43] */ double tmp; /* [43] */ double u21i; /* [43] */ double u31i; /* [43] */ double u41i; /* [43] */ double u51i; /* [43] */ double u21j; /* [43] */ double u31j; /* [43] */ double u41j; /* [43] */ double u51j; /* [43] */ double u21k; /* [43] */ double u31k; /* [43] */ double u41k; /* [43] */ double u51k; /* [43] */ double u21im1; /* [43] */ double u31im1; /* [43] */ double u41im1; /* [43] */ double u51im1; /* [43] */ double u21jm1; /* [43] */ double u31jm1; /* [43] */ double u41jm1; /* [43] */ double u51jm1; /* [43] */ double u21km1; /* [43] */ double u31km1; /* [43] */ double u41km1; /* [43] */ double u51km1; /* [43] */ dsspm = dssp; /* [43] */ /* [43] */ /* [43] */ /* [43] */ for (i = 0; i < nx; i++) { /* [43] */ /* [43] */ /* [43] */ /* [43] */ /* [43] */ for (j = 0; j < ny; j++) { /* [43] */ /* [43] */ /* [43] */ /* [43] */ /* [43] */ for (k = 0; k < nz; k++) { /* [43] */ /* [43] */ /* [43] */ /* [43] */ /* [43] */ for (m = 0; m < 5; m++) { /* [43] */ /* [43] */ frct[i][j][k][m] = 0.0; } } } } /* [43] */ /* [43] */ /* [43] */ /* [43] */ for (i = 0; i < nx; i++) { /* [43] */ /* [43] */ iglob = i; /* [43] */ xi = ((double)iglob) / (nx0 - 1); /* [43] */ /* [43] */ /* [43] */ /* [43] */ for (j = 0; j < ny; j++) { /* [43] */ /* [43] */ jglob = j; /* [43] */ eta = ((double)jglob) / (ny0 - 1); /* [43] */ /* [43] */ /* [43] */ /* [43] */ for (k = 0; k < nz; k++) { /* [43] */ /* [43] */ zeta = ((double)k) / (nz - 1); /* [43] */ /* [43] */ /* [43] */ /* [43] */ for (m = 0; m < 5; m++) { /* [43] */ /* [43] */ rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } } } /* [43] */ // /* [43] */ /* [44] */ L1 = 0; /* [44] */ L2 = nx - 1; /* [44] */ /* [44] */ /* [44] */ /* [44] */ for (i = L1; i <= L2; i++) { /* [44] */ /* [44] */ /* [44] */ /* [44] */ /* [44] */ for (j = jst; j <= jend; j++) { /* [44] */ /* [44] */ /* [44] */ /* [44] */ /* [44] */ for (k = 1; k < nz - 1; k++) { /* [44] */ /* [44] */ flux[i][j][k][0] = rsd[i][j][k][1]; /* [44] */ u21 = rsd[i][j][k][1] / rsd[i][j][k][0]; /* [44] */ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /* [44] */ flux[i][j][k][1] = rsd[i][j][k][1] * u21 + 0.40e+00 * (rsd[i][j][k][4] - q); /* [44] */ flux[i][j][k][2] = rsd[i][j][k][2] * u21; /* [44] */ flux[i][j][k][3] = rsd[i][j][k][3] * u21; /* [44] */ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21; } } } /* [44] */ // /* [44] */ /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (j = jst; j <= jend; j++) { /* [45] */ /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (k = 1; k <= nz - 2; k++) { /* [45] */ /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (i = ist; i <= iend; i++) { /* [45] */ /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (m = 0; m < 5; m++) { /* [45] */ /* [45] */ frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]); } } /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (i = ist; i <= L2; i++) { /* [45] */ /* [45] */ tmp = 1.0 / rsd[i][j][k][0]; /* [45] */ u21i = tmp * rsd[i][j][k][1]; /* [45] */ u31i = tmp * rsd[i][j][k][2]; /* [45] */ u41i = tmp * rsd[i][j][k][3]; /* [45] */ u51i = tmp * rsd[i][j][k][4]; /* [45] */ tmp = 1.0 / rsd[i - 1][j][k][0]; /* [45] */ u21im1 = tmp * rsd[i - 1][j][k][1]; /* [45] */ u31im1 = tmp * rsd[i - 1][j][k][2]; /* [45] */ u41im1 = tmp * rsd[i - 1][j][k][3]; /* [45] */ u51im1 = tmp * rsd[i - 1][j][k][4]; /* [45] */ flux[i][j][k][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /* [45] */ flux[i][j][k][2] = tx3 * (u31i - u31im1); /* [45] */ flux[i][j][k][3] = tx3 * (u41i - u41im1); /* [45] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * ((u21i * u21i + u31i * u31i + u41i * u41i) - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + (1.0 / 6.0) * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (i = ist; i <= iend; i++) { /* [45] */ /* [45] */ frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * (rsd[i - 1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i + 1][j][k][0]); /* [45] */ frct[i][j][k][1] = frct[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (rsd[i - 1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i + 1][j][k][1]); /* [45] */ frct[i][j][k][2] = frct[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (rsd[i - 1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i + 1][j][k][2]); /* [45] */ frct[i][j][k][3] = frct[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (rsd[i - 1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i + 1][j][k][3]); /* [45] */ frct[i][j][k][4] = frct[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (rsd[i - 1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i + 1][j][k][4]); } /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (m = 0; m < 5; m++) { /* [45] */ /* [45] */ frct[1][j][k][m] = frct[1][j][k][m] - dsspm * (+5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m]); /* [45] */ frct[2][j][k][m] = frct[2][j][k][m] - dsspm * (-4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m]); } /* [45] */ ist1 = 3; /* [45] */ iend1 = nx - 4; /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (i = ist1; i <= iend1; i++) { /* [45] */ /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (m = 0; m < 5; m++) { /* [45] */ /* [45] */ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]); } } /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (m = 0; m < 5; m++) { /* [45] */ /* [45] */ frct[nx - 3][j][k][m] = frct[nx - 3][j][k][m] - dsspm * (rsd[nx - 5][j][k][m] - 4.0 * rsd[nx - 4][j][k][m] + 6.0 * rsd[nx - 3][j][k][m] - 4.0 * rsd[nx - 2][j][k][m]); /* [45] */ frct[nx - 2][j][k][m] = frct[nx - 2][j][k][m] - dsspm * (rsd[nx - 4][j][k][m] - 4.0 * rsd[nx - 3][j][k][m] + 5.0 * rsd[nx - 2][j][k][m]); } } } /* [45] */ // /* [45] */ /* [46] */ L1 = 0; /* [46] */ L2 = ny - 1; /* [46] */ /* [46] */ /* [46] */ /* [46] */ for (i = ist; i <= iend; i++) { /* [46] */ /* [46] */ /* [46] */ /* [46] */ /* [46] */ for (j = L1; j <= L2; j++) { /* [46] */ /* [46] */ /* [46] */ /* [46] */ /* [46] */ for (k = 1; k <= nz - 2; k++) { /* [46] */ /* [46] */ flux[i][j][k][0] = rsd[i][j][k][2]; /* [46] */ u31 = rsd[i][j][k][2] / rsd[i][j][k][0]; /* [46] */ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /* [46] */ flux[i][j][k][1] = rsd[i][j][k][1] * u31; /* [46] */ flux[i][j][k][2] = rsd[i][j][k][2] * u31 + 0.40e+00 * (rsd[i][j][k][4] - q); /* [46] */ flux[i][j][k][3] = rsd[i][j][k][3] * u31; /* [46] */ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31; } } } /* [46] */ // /* [46] */ /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (i = ist; i <= iend; i++) { /* [47] */ /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (k = 1; k <= nz - 2; k++) { /* [47] */ /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (j = jst; j <= jend; j++) { /* [47] */ /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (m = 0; m < 5; m++) { /* [47] */ /* [47] */ frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]); } } /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (j = jst; j <= L2; j++) { /* [47] */ /* [47] */ tmp = 1.0 / rsd[i][j][k][0]; /* [47] */ u21j = tmp * rsd[i][j][k][1]; /* [47] */ u31j = tmp * rsd[i][j][k][2]; /* [47] */ u41j = tmp * rsd[i][j][k][3]; /* [47] */ u51j = tmp * rsd[i][j][k][4]; /* [47] */ tmp = 1.0 / rsd[i][j - 1][k][0]; /* [47] */ u21jm1 = tmp * rsd[i][j - 1][k][1]; /* [47] */ u31jm1 = tmp * rsd[i][j - 1][k][2]; /* [47] */ u41jm1 = tmp * rsd[i][j - 1][k][3]; /* [47] */ u51jm1 = tmp * rsd[i][j - 1][k][4]; /* [47] */ flux[i][j][k][1] = ty3 * (u21j - u21jm1); /* [47] */ flux[i][j][k][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /* [47] */ flux[i][j][k][3] = ty3 * (u41j - u41jm1); /* [47] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * ((u21j * u21j + u31j * u31j + u41j * u41j) - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + (1.0 / 6.0) * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (j = jst; j <= jend; j++) { /* [47] */ /* [47] */ frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * (rsd[i][j - 1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j + 1][k][0]); /* [47] */ frct[i][j][k][1] = frct[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (rsd[i][j - 1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j + 1][k][1]); /* [47] */ frct[i][j][k][2] = frct[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (rsd[i][j - 1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j + 1][k][2]); /* [47] */ frct[i][j][k][3] = frct[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (rsd[i][j - 1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j + 1][k][3]); /* [47] */ frct[i][j][k][4] = frct[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (rsd[i][j - 1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j + 1][k][4]); } /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (m = 0; m < 5; m++) { /* [47] */ /* [47] */ frct[i][1][k][m] = frct[i][1][k][m] - dsspm * (+5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m]); /* [47] */ frct[i][2][k][m] = frct[i][2][k][m] - dsspm * (-4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m]); } /* [47] */ jst1 = 3; /* [47] */ jend1 = ny - 4; /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (j = jst1; j <= jend1; j++) { /* [47] */ /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (m = 0; m < 5; m++) { /* [47] */ /* [47] */ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]); } } /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (m = 0; m < 5; m++) { /* [47] */ /* [47] */ frct[i][ny - 3][k][m] = frct[i][ny - 3][k][m] - dsspm * (rsd[i][ny - 5][k][m] - 4.0 * rsd[i][ny - 4][k][m] + 6.0 * rsd[i][ny - 3][k][m] - 4.0 * rsd[i][ny - 2][k][m]); /* [47] */ frct[i][ny - 2][k][m] = frct[i][ny - 2][k][m] - dsspm * (rsd[i][ny - 4][k][m] - 4.0 * rsd[i][ny - 3][k][m] + 5.0 * rsd[i][ny - 2][k][m]); } } } /* [47] */ // /* [47] */ /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (i = ist; i <= iend; i++) { /* [48] */ /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (j = jst; j <= jend; j++) { /* [48] */ /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (k = 0; k <= nz - 1; k++) { /* [48] */ /* [48] */ flux[i][j][k][0] = rsd[i][j][k][3]; /* [48] */ u41 = rsd[i][j][k][3] / rsd[i][j][k][0]; /* [48] */ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /* [48] */ flux[i][j][k][1] = rsd[i][j][k][1] * u41; /* [48] */ flux[i][j][k][2] = rsd[i][j][k][2] * u41; /* [48] */ flux[i][j][k][3] = rsd[i][j][k][3] * u41 + 0.40e+00 * (rsd[i][j][k][4] - q); /* [48] */ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41; } /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (k = 1; k <= nz - 2; k++) { /* [48] */ /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (m = 0; m < 5; m++) { /* [48] */ /* [48] */ frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]); } } /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (k = 1; k <= nz - 1; k++) { /* [48] */ /* [48] */ tmp = 1.0 / rsd[i][j][k][0]; /* [48] */ u21k = tmp * rsd[i][j][k][1]; /* [48] */ u31k = tmp * rsd[i][j][k][2]; /* [48] */ u41k = tmp * rsd[i][j][k][3]; /* [48] */ u51k = tmp * rsd[i][j][k][4]; /* [48] */ tmp = 1.0 / rsd[i][j][k - 1][0]; /* [48] */ u21km1 = tmp * rsd[i][j][k - 1][1]; /* [48] */ u31km1 = tmp * rsd[i][j][k - 1][2]; /* [48] */ u41km1 = tmp * rsd[i][j][k - 1][3]; /* [48] */ u51km1 = tmp * rsd[i][j][k - 1][4]; /* [48] */ flux[i][j][k][1] = tz3 * (u21k - u21km1); /* [48] */ flux[i][j][k][2] = tz3 * (u31k - u31km1); /* [48] */ flux[i][j][k][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /* [48] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * ((u21k * u21k + u31k * u31k + u41k * u41k) - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + (1.0 / 6.0) * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (k = 1; k <= nz - 2; k++) { /* [48] */ /* [48] */ frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * (rsd[i][j][k + 1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k - 1][0]); /* [48] */ frct[i][j][k][1] = frct[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (rsd[i][j][k + 1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k - 1][1]); /* [48] */ frct[i][j][k][2] = frct[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (rsd[i][j][k + 1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k - 1][2]); /* [48] */ frct[i][j][k][3] = frct[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (rsd[i][j][k + 1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k - 1][3]); /* [48] */ frct[i][j][k][4] = frct[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (rsd[i][j][k + 1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k - 1][4]); } /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (m = 0; m < 5; m++) { /* [48] */ /* [48] */ frct[i][j][1][m] = frct[i][j][1][m] - dsspm * (+5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m]); /* [48] */ frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (-4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]); } /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (k = 3; k <= nz - 4; k++) { /* [48] */ /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (m = 0; m < 5; m++) { /* [48] */ /* [48] */ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]); } } /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (m = 0; m < 5; m++) { /* [48] */ /* [48] */ frct[i][j][nz - 3][m] = frct[i][j][nz - 3][m] - dsspm * (rsd[i][j][nz - 5][m] - 4.0 * rsd[i][j][nz - 4][m] + 6.0 * rsd[i][j][nz - 3][m] - 4.0 * rsd[i][j][nz - 2][m]); /* [48] */ frct[i][j][nz - 2][m] = frct[i][j][nz - 2][m] - dsspm * (rsd[i][j][nz - 4][m] - 4.0 * rsd[i][j][nz - 3][m] + 5.0 * rsd[i][j][nz - 2][m]); } } } } /* [] */ static void error() { /* [] */ /* [] */ int i; /* [] */ int j; /* [] */ int k; /* [] */ int m; /* [] */ int iglob; /* [] */ int jglob; /* [] */ double tmp; /* [] */ double u000ijk[5]; /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ errnm[m] = 0.0; } /* [] */ /* [] */ /* [] */ /* [] */ for (i = ist; i <= iend; i++) { /* [] */ /* [] */ iglob = i; /* [] */ /* [] */ /* [] */ /* [] */ for (j = jst; j <= jend; j++) { /* [] */ /* [] */ jglob = j; /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz - 2; k++) { /* [] */ /* [] */ exact(iglob, jglob, k, u000ijk); /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ tmp = (u000ijk[m] - u[i][j][k][m]); /* [] */ errnm[m] = errnm[m] + tmp * tmp; } } } } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ double _imopVarPre151; /* [] */ double _imopVarPre152; /* [] */ _imopVarPre151 = errnm[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /* [] */ _imopVarPre152 = sqrt(_imopVarPre151); /* [] */ /* [] */ errnm[m] = _imopVarPre152; } } /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ static void exact(int i, int j, int k, double u000ijk[5]) { /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ int m; /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ double xi; /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ double eta; /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ double zeta; /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ xi = ((double)i) / (nx0 - 1); /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ eta = ((double)j) / (ny0 - 1); /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ zeta = ((double)k) / (nz - 1); /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ for (m = 0; m < 5; m++) { /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ u000ijk[m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ static void jacld(int k) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ int i; /* [13, 14, 25, 41] */ int j; /* [13, 14, 25, 41] */ double r43; /* [13, 14, 25, 41] */ double c1345; /* [13, 14, 25, 41] */ double c34; /* [13, 14, 25, 41] */ double tmp1; /* [13, 14, 25, 41] */ double tmp2; /* [13, 14, 25, 41] */ double tmp3; /* [13, 14, 25, 41] */ r43 = (4.0 / 3.0); /* [13, 14, 25, 41] */ c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00; /* [13, 14, 25, 41] */ c34 = 1.00e-01 * 1.00e+00; /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (i = ist; i <= iend; i++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (j = jst; j <= jend; j++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ tmp1 = 1.0 / u[i][j][k][0]; /* [13, 14, 25, 41] */ tmp2 = tmp1 * tmp1; /* [13, 14, 25, 41] */ tmp3 = tmp1 * tmp2; /* [13, 14, 25, 41] */ d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1); /* [13, 14, 25, 41] */ d[i][j][0][1] = 0.0; /* [13, 14, 25, 41] */ d[i][j][0][2] = 0.0; /* [13, 14, 25, 41] */ d[i][j][0][3] = 0.0; /* [13, 14, 25, 41] */ d[i][j][0][4] = 0.0; /* [13, 14, 25, 41] */ d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1])); /* [13, 14, 25, 41] */ d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2); /* [13, 14, 25, 41] */ d[i][j][1][2] = 0.0; /* [13, 14, 25, 41] */ d[i][j][1][3] = 0.0; /* [13, 14, 25, 41] */ d[i][j][1][4] = 0.0; /* [13, 14, 25, 41] */ d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2])); /* [13, 14, 25, 41] */ d[i][j][2][1] = 0.0; /* [13, 14, 25, 41] */ d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3); /* [13, 14, 25, 41] */ d[i][j][2][3] = 0.0; /* [13, 14, 25, 41] */ d[i][j][2][4] = 0.0; /* [13, 14, 25, 41] */ d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3])); /* [13, 14, 25, 41] */ d[i][j][3][1] = 0.0; /* [13, 14, 25, 41] */ d[i][j][3][2] = 0.0; /* [13, 14, 25, 41] */ d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4); /* [13, 14, 25, 41] */ d[i][j][3][4] = 0.0; /* [13, 14, 25, 41] */ d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4])); /* [13, 14, 25, 41] */ d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]); /* [13, 14, 25, 41] */ d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]); /* [13, 14, 25, 41] */ d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]); /* [13, 14, 25, 41] */ d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5); /* [13, 14, 25, 41] */ tmp1 = 1.0 / u[i][j][k - 1][0]; /* [13, 14, 25, 41] */ tmp2 = tmp1 * tmp1; /* [13, 14, 25, 41] */ tmp3 = tmp1 * tmp2; /* [13, 14, 25, 41] */ a[i][j][0][0] = -dt * tz1 * dz1; /* [13, 14, 25, 41] */ a[i][j][0][1] = 0.0; /* [13, 14, 25, 41] */ a[i][j][0][2] = 0.0; /* [13, 14, 25, 41] */ a[i][j][0][3] = -dt * tz2; /* [13, 14, 25, 41] */ a[i][j][0][4] = 0.0; /* [13, 14, 25, 41] */ a[i][j][1][0] = -dt * tz2 * (-(u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][1]); /* [13, 14, 25, 41] */ a[i][j][1][1] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2; /* [13, 14, 25, 41] */ a[i][j][1][2] = 0.0; /* [13, 14, 25, 41] */ a[i][j][1][3] = -dt * tz2 * (u[i][j][k - 1][1] * tmp1); /* [13, 14, 25, 41] */ a[i][j][1][4] = 0.0; /* [13, 14, 25, 41] */ a[i][j][2][0] = -dt * tz2 * (-(u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][2]); /* [13, 14, 25, 41] */ a[i][j][2][1] = 0.0; /* [13, 14, 25, 41] */ a[i][j][2][2] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3; /* [13, 14, 25, 41] */ a[i][j][2][3] = -dt * tz2 * (u[i][j][k - 1][2] * tmp1); /* [13, 14, 25, 41] */ a[i][j][2][4] = 0.0; /* [13, 14, 25, 41] */ a[i][j][3][0] = -dt * tz2 * (-(u[i][j][k - 1][3] * tmp1) * (u[i][j][k - 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k - 1][3]); /* [13, 14, 25, 41] */ a[i][j][3][1] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][1] * tmp1)); /* [13, 14, 25, 41] */ a[i][j][3][2] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][2] * tmp1)); /* [13, 14, 25, 41] */ a[i][j][3][3] = -dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4; /* [13, 14, 25, 41] */ a[i][j][3][4] = -dt * tz2 * 0.40e+00; /* [13, 14, 25, 41] */ a[i][j][4][0] = -dt * tz2 * ((0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k - 1][4] * tmp1)) * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k - 1][1] * u[i][j][k - 1][1]) - (c34 - c1345) * tmp3 * (u[i][j][k - 1][2] * u[i][j][k - 1][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k - 1][3] * u[i][j][k - 1][3]) - c1345 * tmp2 * u[i][j][k - 1][4]); /* [13, 14, 25, 41] */ a[i][j][4][1] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][1]; /* [13, 14, 25, 41] */ a[i][j][4][2] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][2]; /* [13, 14, 25, 41] */ a[i][j][4][3] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + 3.0 * u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k - 1][3]; /* [13, 14, 25, 41] */ a[i][j][4][4] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; /* [13, 14, 25, 41] */ tmp1 = 1.0 / u[i][j - 1][k][0]; /* [13, 14, 25, 41] */ tmp2 = tmp1 * tmp1; /* [13, 14, 25, 41] */ tmp3 = tmp1 * tmp2; /* [13, 14, 25, 41] */ b[i][j][0][0] = -dt * ty1 * dy1; /* [13, 14, 25, 41] */ b[i][j][0][1] = 0.0; /* [13, 14, 25, 41] */ b[i][j][0][2] = -dt * ty2; /* [13, 14, 25, 41] */ b[i][j][0][3] = 0.0; /* [13, 14, 25, 41] */ b[i][j][0][4] = 0.0; /* [13, 14, 25, 41] */ b[i][j][1][0] = -dt * ty2 * (-(u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][1]); /* [13, 14, 25, 41] */ b[i][j][1][1] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2; /* [13, 14, 25, 41] */ b[i][j][1][2] = -dt * ty2 * (u[i][j - 1][k][1] * tmp1); /* [13, 14, 25, 41] */ b[i][j][1][3] = 0.0; /* [13, 14, 25, 41] */ b[i][j][1][4] = 0.0; /* [13, 14, 25, 41] */ b[i][j][2][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * tmp1) * (u[i][j - 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j - 1][k][2]); /* [13, 14, 25, 41] */ b[i][j][2][1] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][1] * tmp1)); /* [13, 14, 25, 41] */ b[i][j][2][2] = -dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3; /* [13, 14, 25, 41] */ b[i][j][2][3] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][3] * tmp1)); /* [13, 14, 25, 41] */ b[i][j][2][4] = -dt * ty2 * 0.40e+00; /* [13, 14, 25, 41] */ b[i][j][3][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][3]); /* [13, 14, 25, 41] */ b[i][j][3][1] = 0.0; /* [13, 14, 25, 41] */ b[i][j][3][2] = -dt * ty2 * (u[i][j - 1][k][3] * tmp1); /* [13, 14, 25, 41] */ b[i][j][3][3] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4; /* [13, 14, 25, 41] */ b[i][j][3][4] = 0.0; /* [13, 14, 25, 41] */ b[i][j][4][0] = -dt * ty2 * ((0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j - 1][k][4] * tmp1)) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j - 1][k][1]) * (u[i][j - 1][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j - 1][k][2]) * (u[i][j - 1][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j - 1][k][3]) * (u[i][j - 1][k][3]))) - c1345 * tmp2 * u[i][j - 1][k][4]); /* [13, 14, 25, 41] */ b[i][j][4][1] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][1]; /* [13, 14, 25, 41] */ b[i][j][4][2] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + 3.0 * u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j - 1][k][2]; /* [13, 14, 25, 41] */ b[i][j][4][3] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][3]; /* [13, 14, 25, 41] */ b[i][j][4][4] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /* [13, 14, 25, 41] */ tmp1 = 1.0 / u[i - 1][j][k][0]; /* [13, 14, 25, 41] */ tmp2 = tmp1 * tmp1; /* [13, 14, 25, 41] */ tmp3 = tmp1 * tmp2; /* [13, 14, 25, 41] */ c[i][j][0][0] = -dt * tx1 * dx1; /* [13, 14, 25, 41] */ c[i][j][0][1] = -dt * tx2; /* [13, 14, 25, 41] */ c[i][j][0][2] = 0.0; /* [13, 14, 25, 41] */ c[i][j][0][3] = 0.0; /* [13, 14, 25, 41] */ c[i][j][0][4] = 0.0; /* [13, 14, 25, 41] */ c[i][j][1][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * tmp1) * (u[i - 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i - 1][j][k][1]); /* [13, 14, 25, 41] */ c[i][j][1][1] = -dt * tx2 * ((2.0 - 0.40e+00) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2; /* [13, 14, 25, 41] */ c[i][j][1][2] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][2] * tmp1)); /* [13, 14, 25, 41] */ c[i][j][1][3] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][3] * tmp1)); /* [13, 14, 25, 41] */ c[i][j][1][4] = -dt * tx2 * 0.40e+00; /* [13, 14, 25, 41] */ c[i][j][2][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][2]); /* [13, 14, 25, 41] */ c[i][j][2][1] = -dt * tx2 * (u[i - 1][j][k][2] * tmp1); /* [13, 14, 25, 41] */ c[i][j][2][2] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3; /* [13, 14, 25, 41] */ c[i][j][2][3] = 0.0; /* [13, 14, 25, 41] */ c[i][j][2][4] = 0.0; /* [13, 14, 25, 41] */ c[i][j][3][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][3]); /* [13, 14, 25, 41] */ c[i][j][3][1] = -dt * tx2 * (u[i - 1][j][k][3] * tmp1); /* [13, 14, 25, 41] */ c[i][j][3][2] = 0.0; /* [13, 14, 25, 41] */ c[i][j][3][3] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4; /* [13, 14, 25, 41] */ c[i][j][3][4] = 0.0; /* [13, 14, 25, 41] */ c[i][j][4][0] = -dt * tx2 * ((0.40e+00 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i - 1][j][k][4] * tmp1)) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i - 1][j][k][1]) * (u[i - 1][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i - 1][j][k][2]) * (u[i - 1][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i - 1][j][k][3]) * (u[i - 1][j][k][3]))) - c1345 * tmp2 * u[i - 1][j][k][4]); /* [13, 14, 25, 41] */ c[i][j][4][1] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i - 1][j][k][1]; /* [13, 14, 25, 41] */ c[i][j][4][2] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][2] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][2]; /* [13, 14, 25, 41] */ c[i][j][4][3] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][3] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][3]; /* [13, 14, 25, 41] */ c[i][j][4][4] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; } } } /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ static void jacu(int k) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ int i; /* [13, 14, 26, 42] */ int j; /* [13, 14, 26, 42] */ double r43; /* [13, 14, 26, 42] */ double c1345; /* [13, 14, 26, 42] */ double c34; /* [13, 14, 26, 42] */ double tmp1; /* [13, 14, 26, 42] */ double tmp2; /* [13, 14, 26, 42] */ double tmp3; /* [13, 14, 26, 42] */ r43 = (4.0 / 3.0); /* [13, 14, 26, 42] */ c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00; /* [13, 14, 26, 42] */ c34 = 1.00e-01 * 1.00e+00; /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (i = iend; i >= ist; i--) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (j = jend; j >= jst; j--) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ tmp1 = 1.0 / u[i][j][k][0]; /* [13, 14, 26, 42] */ tmp2 = tmp1 * tmp1; /* [13, 14, 26, 42] */ tmp3 = tmp1 * tmp2; /* [13, 14, 26, 42] */ d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1); /* [13, 14, 26, 42] */ d[i][j][0][1] = 0.0; /* [13, 14, 26, 42] */ d[i][j][0][2] = 0.0; /* [13, 14, 26, 42] */ d[i][j][0][3] = 0.0; /* [13, 14, 26, 42] */ d[i][j][0][4] = 0.0; /* [13, 14, 26, 42] */ d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1])); /* [13, 14, 26, 42] */ d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2); /* [13, 14, 26, 42] */ d[i][j][1][2] = 0.0; /* [13, 14, 26, 42] */ d[i][j][1][3] = 0.0; /* [13, 14, 26, 42] */ d[i][j][1][4] = 0.0; /* [13, 14, 26, 42] */ d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2])); /* [13, 14, 26, 42] */ d[i][j][2][1] = 0.0; /* [13, 14, 26, 42] */ d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3); /* [13, 14, 26, 42] */ d[i][j][2][3] = 0.0; /* [13, 14, 26, 42] */ d[i][j][2][4] = 0.0; /* [13, 14, 26, 42] */ d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3])); /* [13, 14, 26, 42] */ d[i][j][3][1] = 0.0; /* [13, 14, 26, 42] */ d[i][j][3][2] = 0.0; /* [13, 14, 26, 42] */ d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4); /* [13, 14, 26, 42] */ d[i][j][3][4] = 0.0; /* [13, 14, 26, 42] */ d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4])); /* [13, 14, 26, 42] */ d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]); /* [13, 14, 26, 42] */ d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]); /* [13, 14, 26, 42] */ d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]); /* [13, 14, 26, 42] */ d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5); /* [13, 14, 26, 42] */ tmp1 = 1.0 / u[i + 1][j][k][0]; /* [13, 14, 26, 42] */ tmp2 = tmp1 * tmp1; /* [13, 14, 26, 42] */ tmp3 = tmp1 * tmp2; /* [13, 14, 26, 42] */ a[i][j][0][0] = -dt * tx1 * dx1; /* [13, 14, 26, 42] */ a[i][j][0][1] = dt * tx2; /* [13, 14, 26, 42] */ a[i][j][0][2] = 0.0; /* [13, 14, 26, 42] */ a[i][j][0][3] = 0.0; /* [13, 14, 26, 42] */ a[i][j][0][4] = 0.0; /* [13, 14, 26, 42] */ a[i][j][1][0] = dt * tx2 * (-(u[i + 1][j][k][1] * tmp1) * (u[i + 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i + 1][j][k][1]); /* [13, 14, 26, 42] */ a[i][j][1][1] = dt * tx2 * ((2.0 - 0.40e+00) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2; /* [13, 14, 26, 42] */ a[i][j][1][2] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][2] * tmp1)); /* [13, 14, 26, 42] */ a[i][j][1][3] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][3] * tmp1)); /* [13, 14, 26, 42] */ a[i][j][1][4] = dt * tx2 * 0.40e+00; /* [13, 14, 26, 42] */ a[i][j][2][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][2]); /* [13, 14, 26, 42] */ a[i][j][2][1] = dt * tx2 * (u[i + 1][j][k][2] * tmp1); /* [13, 14, 26, 42] */ a[i][j][2][2] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3; /* [13, 14, 26, 42] */ a[i][j][2][3] = 0.0; /* [13, 14, 26, 42] */ a[i][j][2][4] = 0.0; /* [13, 14, 26, 42] */ a[i][j][3][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][3]); /* [13, 14, 26, 42] */ a[i][j][3][1] = dt * tx2 * (u[i + 1][j][k][3] * tmp1); /* [13, 14, 26, 42] */ a[i][j][3][2] = 0.0; /* [13, 14, 26, 42] */ a[i][j][3][3] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4; /* [13, 14, 26, 42] */ a[i][j][3][4] = 0.0; /* [13, 14, 26, 42] */ a[i][j][4][0] = dt * tx2 * ((0.40e+00 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i + 1][j][k][4] * tmp1)) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i + 1][j][k][1]) * (u[i + 1][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i + 1][j][k][2]) * (u[i + 1][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i + 1][j][k][3]) * (u[i + 1][j][k][3]))) - c1345 * tmp2 * u[i + 1][j][k][4]); /* [13, 14, 26, 42] */ a[i][j][4][1] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i + 1][j][k][1]; /* [13, 14, 26, 42] */ a[i][j][4][2] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][2] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][2]; /* [13, 14, 26, 42] */ a[i][j][4][3] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][3] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][3]; /* [13, 14, 26, 42] */ a[i][j][4][4] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; /* [13, 14, 26, 42] */ tmp1 = 1.0 / u[i][j + 1][k][0]; /* [13, 14, 26, 42] */ tmp2 = tmp1 * tmp1; /* [13, 14, 26, 42] */ tmp3 = tmp1 * tmp2; /* [13, 14, 26, 42] */ b[i][j][0][0] = -dt * ty1 * dy1; /* [13, 14, 26, 42] */ b[i][j][0][1] = 0.0; /* [13, 14, 26, 42] */ b[i][j][0][2] = dt * ty2; /* [13, 14, 26, 42] */ b[i][j][0][3] = 0.0; /* [13, 14, 26, 42] */ b[i][j][0][4] = 0.0; /* [13, 14, 26, 42] */ b[i][j][1][0] = dt * ty2 * (-(u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][1]); /* [13, 14, 26, 42] */ b[i][j][1][1] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2; /* [13, 14, 26, 42] */ b[i][j][1][2] = dt * ty2 * (u[i][j + 1][k][1] * tmp1); /* [13, 14, 26, 42] */ b[i][j][1][3] = 0.0; /* [13, 14, 26, 42] */ b[i][j][1][4] = 0.0; /* [13, 14, 26, 42] */ b[i][j][2][0] = dt * ty2 * (-(u[i][j + 1][k][2] * tmp1) * (u[i][j + 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j + 1][k][2]); /* [13, 14, 26, 42] */ b[i][j][2][1] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][1] * tmp1)); /* [13, 14, 26, 42] */ b[i][j][2][2] = dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3; /* [13, 14, 26, 42] */ b[i][j][2][3] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][3] * tmp1)); /* [13, 14, 26, 42] */ b[i][j][2][4] = dt * ty2 * 0.40e+00; /* [13, 14, 26, 42] */ b[i][j][3][0] = dt * ty2 * (-(u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][3]); /* [13, 14, 26, 42] */ b[i][j][3][1] = 0.0; /* [13, 14, 26, 42] */ b[i][j][3][2] = dt * ty2 * (u[i][j + 1][k][3] * tmp1); /* [13, 14, 26, 42] */ b[i][j][3][3] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4; /* [13, 14, 26, 42] */ b[i][j][3][4] = 0.0; /* [13, 14, 26, 42] */ b[i][j][4][0] = dt * ty2 * ((0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j + 1][k][4] * tmp1)) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j + 1][k][1]) * (u[i][j + 1][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j + 1][k][2]) * (u[i][j + 1][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j + 1][k][3]) * (u[i][j + 1][k][3]))) - c1345 * tmp2 * u[i][j + 1][k][4]); /* [13, 14, 26, 42] */ b[i][j][4][1] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][1]; /* [13, 14, 26, 42] */ b[i][j][4][2] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + 3.0 * u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j + 1][k][2]; /* [13, 14, 26, 42] */ b[i][j][4][3] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][3]; /* [13, 14, 26, 42] */ b[i][j][4][4] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /* [13, 14, 26, 42] */ tmp1 = 1.0 / u[i][j][k + 1][0]; /* [13, 14, 26, 42] */ tmp2 = tmp1 * tmp1; /* [13, 14, 26, 42] */ tmp3 = tmp1 * tmp2; /* [13, 14, 26, 42] */ c[i][j][0][0] = -dt * tz1 * dz1; /* [13, 14, 26, 42] */ c[i][j][0][1] = 0.0; /* [13, 14, 26, 42] */ c[i][j][0][2] = 0.0; /* [13, 14, 26, 42] */ c[i][j][0][3] = dt * tz2; /* [13, 14, 26, 42] */ c[i][j][0][4] = 0.0; /* [13, 14, 26, 42] */ c[i][j][1][0] = dt * tz2 * (-(u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][1]); /* [13, 14, 26, 42] */ c[i][j][1][1] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2; /* [13, 14, 26, 42] */ c[i][j][1][2] = 0.0; /* [13, 14, 26, 42] */ c[i][j][1][3] = dt * tz2 * (u[i][j][k + 1][1] * tmp1); /* [13, 14, 26, 42] */ c[i][j][1][4] = 0.0; /* [13, 14, 26, 42] */ c[i][j][2][0] = dt * tz2 * (-(u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][2]); /* [13, 14, 26, 42] */ c[i][j][2][1] = 0.0; /* [13, 14, 26, 42] */ c[i][j][2][2] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3; /* [13, 14, 26, 42] */ c[i][j][2][3] = dt * tz2 * (u[i][j][k + 1][2] * tmp1); /* [13, 14, 26, 42] */ c[i][j][2][4] = 0.0; /* [13, 14, 26, 42] */ c[i][j][3][0] = dt * tz2 * (-(u[i][j][k + 1][3] * tmp1) * (u[i][j][k + 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k + 1][3]); /* [13, 14, 26, 42] */ c[i][j][3][1] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][1] * tmp1)); /* [13, 14, 26, 42] */ c[i][j][3][2] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][2] * tmp1)); /* [13, 14, 26, 42] */ c[i][j][3][3] = dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4; /* [13, 14, 26, 42] */ c[i][j][3][4] = dt * tz2 * 0.40e+00; /* [13, 14, 26, 42] */ c[i][j][4][0] = dt * tz2 * ((0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k + 1][4] * tmp1)) * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k + 1][1]) * (u[i][j][k + 1][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k + 1][2]) * (u[i][j][k + 1][2]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k + 1][3]) * (u[i][j][k + 1][3]))) - c1345 * tmp2 * u[i][j][k + 1][4]); /* [13, 14, 26, 42] */ c[i][j][4][1] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][1]; /* [13, 14, 26, 42] */ c[i][j][4][2] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][2]; /* [13, 14, 26, 42] */ c[i][j][4][3] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + 3.0 * u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k + 1][3]; /* [13, 14, 26, 42] */ c[i][j][4][4] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; } } } /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ static void l2norm(int nx0, int ny0, int nz0, int ist, int iend, int jst, int jend, double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5], double sum[5]) { /* [] */ /* [] */ int i; /* [] */ int j; /* [] */ int k; /* [] */ int m; /* [] */ double sum0 = 0.0; /* [] */ double sum1 = 0.0; /* [] */ double sum2 = 0.0; /* [] */ double sum3 = 0.0; /* [] */ double sum4 = 0.0; /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ sum[m] = 0.0; } /* [] */ /* [] */ /* [] */ /* [] */ for (i = ist; i <= iend; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = jst; j <= jend; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz0 - 2; k++) { /* [] */ /* [] */ sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0]; /* [] */ sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1]; /* [] */ sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2]; /* [] */ sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3]; /* [] */ sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4]; } } } /* [] */ // /* [] */ /* [] */ /* [] */ sum[0] += sum0; /* [] */ sum[1] += sum1; /* [] */ sum[2] += sum2; /* [] */ sum[3] += sum3; /* [] */ sum[4] += sum4; /* [] */ // /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ double _imopVarPre154; /* [] */ double _imopVarPre155; /* [] */ _imopVarPre154 = sum[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /* [] */ _imopVarPre155 = sqrt(_imopVarPre154); /* [] */ /* [] */ sum[m] = _imopVarPre155; } } /* [] */ static void pintgr() { /* [] */ /* [] */ int i; /* [] */ int j; /* [] */ int k; /* [] */ int ibeg; /* [] */ int ifin; /* [] */ int ifin1; /* [] */ int jbeg; /* [] */ int jfin; /* [] */ int jfin1; /* [] */ int iglob; /* [] */ int iglob1; /* [] */ int iglob2; /* [] */ int jglob; /* [] */ int jglob1; /* [] */ int jglob2; /* [] */ double phi1[12 + 2][12 + 2]; /* [] */ double phi2[12 + 2][12 + 2]; /* [] */ double frc1; /* [] */ double frc2; /* [] */ double frc3; /* [] */ ibeg = nx; /* [] */ ifin = 0; /* [] */ iglob1 = -1; /* [] */ iglob2 = nx - 1; /* [] */ int _imopVarPre157; /* [] */ _imopVarPre157 = iglob1 >= ii1; /* [] */ /* [] */ if (_imopVarPre157) { /* [] */ /* [] */ _imopVarPre157 = iglob2 < ii2 + nx; } /* [] */ /* [] */ if (_imopVarPre157) { /* [] */ /* [] */ ibeg = 0; } /* [] */ int _imopVarPre159; /* [] */ _imopVarPre159 = iglob1 >= ii1 - nx; /* [] */ /* [] */ if (_imopVarPre159) { /* [] */ /* [] */ _imopVarPre159 = iglob2 <= ii2; } /* [] */ /* [] */ if (_imopVarPre159) { /* [] */ /* [] */ ifin = nx; } /* [] */ int _imopVarPre161; /* [] */ _imopVarPre161 = ii1 >= iglob1; /* [] */ /* [] */ if (_imopVarPre161) { /* [] */ /* [] */ _imopVarPre161 = ii1 <= iglob2; } /* [] */ /* [] */ if (_imopVarPre161) { /* [] */ /* [] */ ibeg = ii1; } /* [] */ int _imopVarPre163; /* [] */ _imopVarPre163 = ii2 >= iglob1; /* [] */ /* [] */ if (_imopVarPre163) { /* [] */ /* [] */ _imopVarPre163 = ii2 <= iglob2; } /* [] */ /* [] */ if (_imopVarPre163) { /* [] */ /* [] */ ifin = ii2; } /* [] */ jbeg = ny; /* [] */ jfin = -1; /* [] */ jglob1 = 0; /* [] */ jglob2 = ny - 1; /* [] */ int _imopVarPre165; /* [] */ _imopVarPre165 = jglob1 >= ji1; /* [] */ /* [] */ if (_imopVarPre165) { /* [] */ /* [] */ _imopVarPre165 = jglob2 < ji2 + ny; } /* [] */ /* [] */ if (_imopVarPre165) { /* [] */ /* [] */ jbeg = 0; } /* [] */ int _imopVarPre167; /* [] */ _imopVarPre167 = jglob1 > ji1 - ny; /* [] */ /* [] */ if (_imopVarPre167) { /* [] */ /* [] */ _imopVarPre167 = jglob2 <= ji2; } /* [] */ /* [] */ if (_imopVarPre167) { /* [] */ /* [] */ jfin = ny; } /* [] */ int _imopVarPre169; /* [] */ _imopVarPre169 = ji1 >= jglob1; /* [] */ /* [] */ if (_imopVarPre169) { /* [] */ /* [] */ _imopVarPre169 = ji1 <= jglob2; } /* [] */ /* [] */ if (_imopVarPre169) { /* [] */ /* [] */ jbeg = ji1; } /* [] */ int _imopVarPre171; /* [] */ _imopVarPre171 = ji2 >= jglob1; /* [] */ /* [] */ if (_imopVarPre171) { /* [] */ /* [] */ _imopVarPre171 = ji2 <= jglob2; } /* [] */ /* [] */ if (_imopVarPre171) { /* [] */ /* [] */ jfin = ji2; } /* [] */ ifin1 = ifin; /* [] */ jfin1 = jfin; /* [] */ /* [] */ if (ifin1 == ii2) { /* [] */ /* [] */ ifin1 = ifin - 1; } /* [] */ /* [] */ if (jfin1 == ji2) { /* [] */ /* [] */ jfin1 = jfin - 1; } /* [] */ /* [] */ /* [] */ /* [] */ for (i = 0; i <= 12 + 1; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 0; k <= 12 + 1; k++) { /* [] */ /* [] */ phi1[i][k] = 0.0; /* [] */ phi2[i][k] = 0.0; } } /* [] */ /* [] */ /* [] */ /* [] */ for (i = ibeg; i <= ifin; i++) { /* [] */ /* [] */ iglob = i; /* [] */ /* [] */ /* [] */ /* [] */ for (j = jbeg; j <= jfin; j++) { /* [] */ /* [] */ jglob = j; /* [] */ k = ki1; /* [] */ phi1[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (((u[i][j][k][1]) * (u[i][j][k][1])) + ((u[i][j][k][2]) * (u[i][j][k][2])) + ((u[i][j][k][3]) * (u[i][j][k][3]))) / u[i][j][k][0]); /* [] */ k = ki2; /* [] */ phi2[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (((u[i][j][k][1]) * (u[i][j][k][1])) + ((u[i][j][k][2]) * (u[i][j][k][2])) + ((u[i][j][k][3]) * (u[i][j][k][3]))) / u[i][j][k][0]); } } /* [] */ frc1 = 0.0; /* [] */ /* [] */ /* [] */ /* [] */ for (i = ibeg; i <= ifin1; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = jbeg; j <= jfin1; j++) { /* [] */ /* [] */ frc1 = frc1 + (phi1[i][j] + phi1[i + 1][j] + phi1[i][j + 1] + phi1[i + 1][j + 1] + phi2[i][j] + phi2[i + 1][j] + phi2[i][j + 1] + phi2[i + 1][j + 1]); } } /* [] */ frc1 = dxi * deta * frc1; /* [] */ /* [] */ /* [] */ /* [] */ for (i = 0; i <= 12 + 1; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 0; k <= 12 + 1; k++) { /* [] */ /* [] */ phi1[i][k] = 0.0; /* [] */ phi2[i][k] = 0.0; } } /* [] */ jglob = jbeg; /* [] */ /* [] */ if (jglob == ji1) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (i = ibeg; i <= ifin; i++) { /* [] */ /* [] */ iglob = i; /* [] */ /* [] */ /* [] */ /* [] */ for (k = ki1; k <= ki2; k++) { /* [] */ /* [] */ phi1[i][k] = 0.40e+00 * (u[i][jbeg][k][4] - 0.50 * (((u[i][jbeg][k][1]) * (u[i][jbeg][k][1])) + ((u[i][jbeg][k][2]) * (u[i][jbeg][k][2])) + ((u[i][jbeg][k][3]) * (u[i][jbeg][k][3]))) / u[i][jbeg][k][0]); } } } /* [] */ jglob = jfin; /* [] */ /* [] */ if (jglob == ji2) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (i = ibeg; i <= ifin; i++) { /* [] */ /* [] */ iglob = i; /* [] */ /* [] */ /* [] */ /* [] */ for (k = ki1; k <= ki2; k++) { /* [] */ /* [] */ phi2[i][k] = 0.40e+00 * (u[i][jfin][k][4] - 0.50 * (((u[i][jfin][k][1]) * (u[i][jfin][k][1])) + ((u[i][jfin][k][2]) * (u[i][jfin][k][2])) + ((u[i][jfin][k][3]) * (u[i][jfin][k][3]))) / u[i][jfin][k][0]); } } } /* [] */ frc2 = 0.0; /* [] */ /* [] */ /* [] */ /* [] */ for (i = ibeg; i <= ifin1; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = ki1; k <= ki2 - 1; k++) { /* [] */ /* [] */ frc2 = frc2 + (phi1[i][k] + phi1[i + 1][k] + phi1[i][k + 1] + phi1[i + 1][k + 1] + phi2[i][k] + phi2[i + 1][k] + phi2[i][k + 1] + phi2[i + 1][k + 1]); } } /* [] */ frc2 = dxi * dzeta * frc2; /* [] */ /* [] */ /* [] */ /* [] */ for (i = 0; i <= 12 + 1; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 0; k <= 12 + 1; k++) { /* [] */ /* [] */ phi1[i][k] = 0.0; /* [] */ phi2[i][k] = 0.0; } } /* [] */ iglob = ibeg; /* [] */ /* [] */ if (iglob == ii1) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = jbeg; j <= jfin; j++) { /* [] */ /* [] */ jglob = j; /* [] */ /* [] */ /* [] */ /* [] */ for (k = ki1; k <= ki2; k++) { /* [] */ /* [] */ phi1[j][k] = 0.40e+00 * (u[ibeg][j][k][4] - 0.50 * (((u[ibeg][j][k][1]) * (u[ibeg][j][k][1])) + ((u[ibeg][j][k][2]) * (u[ibeg][j][k][2])) + ((u[ibeg][j][k][3]) * (u[ibeg][j][k][3]))) / u[ibeg][j][k][0]); } } } /* [] */ iglob = ifin; /* [] */ /* [] */ if (iglob == ii2) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = jbeg; j <= jfin; j++) { /* [] */ /* [] */ jglob = j; /* [] */ /* [] */ /* [] */ /* [] */ for (k = ki1; k <= ki2; k++) { /* [] */ /* [] */ phi2[j][k] = 0.40e+00 * (u[ifin][j][k][4] - 0.50 * (((u[ifin][j][k][1]) * (u[ifin][j][k][1])) + ((u[ifin][j][k][2]) * (u[ifin][j][k][2])) + ((u[ifin][j][k][3]) * (u[ifin][j][k][3]))) / u[ifin][j][k][0]); } } } /* [] */ frc3 = 0.0; /* [] */ /* [] */ /* [] */ /* [] */ for (j = jbeg; j <= jfin1; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = ki1; k <= ki2 - 1; k++) { /* [] */ /* [] */ frc3 = frc3 + (phi1[j][k] + phi1[j + 1][k] + phi1[j][k + 1] + phi1[j + 1][k + 1] + phi2[j][k] + phi2[j + 1][k] + phi2[j][k + 1] + phi2[j + 1][k + 1]); } } /* [] */ frc3 = deta * dzeta * frc3; /* [] */ frc = 0.25 * (frc1 + frc2 + frc3); } /* [] */ static void read_input() { /* [] */ /* [] */ FILE *fp; /* [] */ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - LU Benchmark\n\n"); /* [] */ /* [] */ fp = fopen("inputlu.data", "r"); /* [] */ /* [] */ /* [] */ if (fp != ((void *)0)) { /* [] */ /* [] */ printf(" Reading from input file inputlu.data\n"); /* [] */ /* [] */ int _imopVarPre173; /* [] */ _imopVarPre173 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre173 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre173 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre175; /* [] */ _imopVarPre175 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre175 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre175 = fgetc(fp); /* [] */ } /* [] */ int *_imopVarPre178; /* [] */ int *_imopVarPre179; /* [] */ _imopVarPre178 = &inorm; /* [] */ _imopVarPre179 = &ipr; /* [] */ fscanf(fp, "%d%d", _imopVarPre179, _imopVarPre178); /* [] */ /* [] */ int _imopVarPre181; /* [] */ _imopVarPre181 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre181 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre181 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre183; /* [] */ _imopVarPre183 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre183 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre183 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre185; /* [] */ _imopVarPre185 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre185 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre185 = fgetc(fp); /* [] */ } /* [] */ int *_imopVarPre187; /* [] */ _imopVarPre187 = &itmax; /* [] */ fscanf(fp, "%d", _imopVarPre187); /* [] */ /* [] */ int _imopVarPre189; /* [] */ _imopVarPre189 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre189 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre189 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre191; /* [] */ _imopVarPre191 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre191 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre191 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre193; /* [] */ _imopVarPre193 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre193 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre193 = fgetc(fp); /* [] */ } /* [] */ double *_imopVarPre195; /* [] */ _imopVarPre195 = &dt; /* [] */ fscanf(fp, "%lf", _imopVarPre195); /* [] */ /* [] */ int _imopVarPre197; /* [] */ _imopVarPre197 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre197 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre197 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre199; /* [] */ _imopVarPre199 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre199 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre199 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre201; /* [] */ _imopVarPre201 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre201 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre201 = fgetc(fp); /* [] */ } /* [] */ double *_imopVarPre203; /* [] */ _imopVarPre203 = &omega; /* [] */ fscanf(fp, "%lf", _imopVarPre203); /* [] */ /* [] */ int _imopVarPre205; /* [] */ _imopVarPre205 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre205 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre205 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre207; /* [] */ _imopVarPre207 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre207 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre207 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre209; /* [] */ _imopVarPre209 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre209 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre209 = fgetc(fp); /* [] */ } /* [] */ double *_imopVarPre215; /* [] */ double *_imopVarPre216; /* [] */ double *_imopVarPre217; /* [] */ double *_imopVarPre218; /* [] */ double *_imopVarPre219; /* [] */ _imopVarPre215 = &tolrsd[4]; /* [] */ _imopVarPre216 = &tolrsd[3]; /* [] */ _imopVarPre217 = &tolrsd[2]; /* [] */ _imopVarPre218 = &tolrsd[1]; /* [] */ _imopVarPre219 = &tolrsd[0]; /* [] */ fscanf(fp, "%lf%lf%lf%lf%lf", _imopVarPre219, _imopVarPre218, _imopVarPre217, _imopVarPre216, _imopVarPre215); /* [] */ /* [] */ int _imopVarPre221; /* [] */ _imopVarPre221 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre221 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre221 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre223; /* [] */ _imopVarPre223 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre223 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre223 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre225; /* [] */ _imopVarPre225 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre225 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre225 = fgetc(fp); /* [] */ } /* [] */ int *_imopVarPre229; /* [] */ int *_imopVarPre230; /* [] */ int *_imopVarPre231; /* [] */ _imopVarPre229 = &nz0; /* [] */ _imopVarPre230 = &ny0; /* [] */ _imopVarPre231 = &nx0; /* [] */ fscanf(fp, "%d%d%d", _imopVarPre231, _imopVarPre230, _imopVarPre229); /* [] */ /* [] */ int _imopVarPre233; /* [] */ _imopVarPre233 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre233 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre233 = fgetc(fp); /* [] */ } /* [] */ fclose(fp); /* [] */ } else { /* [] */ /* [] */ ipr = 1; /* [] */ inorm = 50; /* [] */ itmax = 50; /* [] */ dt = 0.5; /* [] */ omega = 1.2; /* [] */ tolrsd[0] = 1.0e-8; /* [] */ tolrsd[1] = 1.0e-8; /* [] */ tolrsd[2] = 1.0e-8; /* [] */ tolrsd[3] = 1.0e-8; /* [] */ tolrsd[4] = 1.0e-8; /* [] */ nx0 = 12; /* [] */ ny0 = 12; /* [] */ nz0 = 12; } /* [] */ int _imopVarPre234; /* [] */ int _imopVarPre235; /* [] */ _imopVarPre234 = nx0 < 4; /* [] */ /* [] */ if (!_imopVarPre234) { /* [] */ /* [] */ _imopVarPre235 = ny0 < 4; /* [] */ /* [] */ if (!_imopVarPre235) { /* [] */ /* [] */ _imopVarPre235 = nz0 < 4; } /* [] */ _imopVarPre234 = _imopVarPre235; } /* [] */ /* [] */ if (_imopVarPre234) { /* [] */ /* [] */ printf(" PROBLEM SIZE IS TOO SMALL - \n" " SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n"); /* [] */ /* [] */ exit(1); /* [] */ } /* [] */ int _imopVarPre236; /* [] */ int _imopVarPre237; /* [] */ _imopVarPre236 = nx0 > 12; /* [] */ /* [] */ if (!_imopVarPre236) { /* [] */ /* [] */ _imopVarPre237 = ny0 > 12; /* [] */ /* [] */ if (!_imopVarPre237) { /* [] */ /* [] */ _imopVarPre237 = nz0 > 12; } /* [] */ _imopVarPre236 = _imopVarPre237; } /* [] */ /* [] */ if (_imopVarPre236) { /* [] */ /* [] */ printf(" PROBLEM SIZE IS TOO LARGE - \n" " NX, NY AND NZ SHOULD BE EQUAL TO \n" " ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n"); /* [] */ /* [] */ exit(1); /* [] */ } /* [] */ printf(" Size: %3dx%3dx%3d\n", nx0, ny0, nz0); /* [] */ /* [] */ printf(" Iterations: %3d\n", itmax); /* [] */ } /* [] */ static void rhs() { /* [] */ /* [] */ int i; /* [] */ int j; /* [] */ int k; /* [] */ int m; /* [] */ int L1; /* [] */ int L2; /* [] */ int ist1; /* [] */ int iend1; /* [] */ int jst1; /* [] */ int jend1; /* [] */ double q; /* [] */ double u21; /* [] */ double u31; /* [] */ double u41; /* [] */ double tmp; /* [] */ double u21i; /* [] */ double u31i; /* [] */ double u41i; /* [] */ double u51i; /* [] */ double u21j; /* [] */ double u31j; /* [] */ double u41j; /* [] */ double u51j; /* [] */ double u21k; /* [] */ double u31k; /* [] */ double u41k; /* [] */ double u51k; /* [] */ double u21im1; /* [] */ double u31im1; /* [] */ double u41im1; /* [] */ double u51im1; /* [] */ double u21jm1; /* [] */ double u31jm1; /* [] */ double u41jm1; /* [] */ double u51jm1; /* [] */ double u21km1; /* [] */ double u31km1; /* [] */ double u41km1; /* [] */ double u51km1; /* [] */ /* [] */ /* [] */ /* [] */ for (i = 0; i <= nx - 1; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = 0; j <= ny - 1; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 0; k <= nz - 1; k++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][k][m] = -frct[i][j][k][m]; } } } } /* [] */ L1 = 0; /* [] */ L2 = nx - 1; /* [] */ /* [] */ /* [] */ /* [] */ for (i = L1; i <= L2; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = jst; j <= jend; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz - 2; k++) { /* [] */ /* [] */ flux[i][j][k][0] = u[i][j][k][1]; /* [] */ u21 = u[i][j][k][1] / u[i][j][k][0]; /* [] */ q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; /* [] */ flux[i][j][k][1] = u[i][j][k][1] * u21 + 0.40e+00 * (u[i][j][k][4] - q); /* [] */ flux[i][j][k][2] = u[i][j][k][2] * u21; /* [] */ flux[i][j][k][3] = u[i][j][k][3] * u21; /* [] */ flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u21; } } } /* [] */ /* [] */ /* [] */ /* [] */ for (j = jst; j <= jend; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz - 2; k++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (i = ist; i <= iend; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][k][m] = rsd[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]); } } /* [] */ L2 = nx - 1; /* [] */ /* [] */ /* [] */ /* [] */ for (i = ist; i <= L2; i++) { /* [] */ /* [] */ tmp = 1.0 / u[i][j][k][0]; /* [] */ u21i = tmp * u[i][j][k][1]; /* [] */ u31i = tmp * u[i][j][k][2]; /* [] */ u41i = tmp * u[i][j][k][3]; /* [] */ u51i = tmp * u[i][j][k][4]; /* [] */ tmp = 1.0 / u[i - 1][j][k][0]; /* [] */ u21im1 = tmp * u[i - 1][j][k][1]; /* [] */ u31im1 = tmp * u[i - 1][j][k][2]; /* [] */ u41im1 = tmp * u[i - 1][j][k][3]; /* [] */ u51im1 = tmp * u[i - 1][j][k][4]; /* [] */ flux[i][j][k][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /* [] */ flux[i][j][k][2] = tx3 * (u31i - u31im1); /* [] */ flux[i][j][k][3] = tx3 * (u41i - u41im1); /* [] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /* [] */ /* [] */ /* [] */ /* [] */ for (i = ist; i <= iend; i++) { /* [] */ /* [] */ rsd[i][j][k][0] = rsd[i][j][k][0] + dx1 * tx1 * (u[i - 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i + 1][j][k][0]); /* [] */ rsd[i][j][k][1] = rsd[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (u[i - 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i + 1][j][k][1]); /* [] */ rsd[i][j][k][2] = rsd[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (u[i - 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i + 1][j][k][2]); /* [] */ rsd[i][j][k][3] = rsd[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (u[i - 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i + 1][j][k][3]); /* [] */ rsd[i][j][k][4] = rsd[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (u[i - 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i + 1][j][k][4]); } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[1][j][k][m] = rsd[1][j][k][m] - dssp * (+5.0 * u[1][j][k][m] - 4.0 * u[2][j][k][m] + u[3][j][k][m]); /* [] */ rsd[2][j][k][m] = rsd[2][j][k][m] - dssp * (-4.0 * u[1][j][k][m] + 6.0 * u[2][j][k][m] - 4.0 * u[3][j][k][m] + u[4][j][k][m]); } /* [] */ ist1 = 3; /* [] */ iend1 = nx - 4; /* [] */ /* [] */ /* [] */ /* [] */ for (i = ist1; i <= iend1; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]); } } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[nx - 3][j][k][m] = rsd[nx - 3][j][k][m] - dssp * (u[nx - 5][j][k][m] - 4.0 * u[nx - 4][j][k][m] + 6.0 * u[nx - 3][j][k][m] - 4.0 * u[nx - 2][j][k][m]); /* [] */ rsd[nx - 2][j][k][m] = rsd[nx - 2][j][k][m] - dssp * (u[nx - 4][j][k][m] - 4.0 * u[nx - 3][j][k][m] + 5.0 * u[nx - 2][j][k][m]); } } } /* [] */ L1 = 0; /* [] */ L2 = ny - 1; /* [] */ /* [] */ /* [] */ /* [] */ for (i = ist; i <= iend; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = L1; j <= L2; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz - 2; k++) { /* [] */ /* [] */ flux[i][j][k][0] = u[i][j][k][2]; /* [] */ u31 = u[i][j][k][2] / u[i][j][k][0]; /* [] */ q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; /* [] */ flux[i][j][k][1] = u[i][j][k][1] * u31; /* [] */ flux[i][j][k][2] = u[i][j][k][2] * u31 + 0.40e+00 * (u[i][j][k][4] - q); /* [] */ flux[i][j][k][3] = u[i][j][k][3] * u31; /* [] */ flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u31; } } } /* [] */ /* [] */ /* [] */ /* [] */ for (i = ist; i <= iend; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz - 2; k++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = jst; j <= jend; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][k][m] = rsd[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]); } } /* [] */ L2 = ny - 1; /* [] */ /* [] */ /* [] */ /* [] */ for (j = jst; j <= L2; j++) { /* [] */ /* [] */ tmp = 1.0 / u[i][j][k][0]; /* [] */ u21j = tmp * u[i][j][k][1]; /* [] */ u31j = tmp * u[i][j][k][2]; /* [] */ u41j = tmp * u[i][j][k][3]; /* [] */ u51j = tmp * u[i][j][k][4]; /* [] */ tmp = 1.0 / u[i][j - 1][k][0]; /* [] */ u21jm1 = tmp * u[i][j - 1][k][1]; /* [] */ u31jm1 = tmp * u[i][j - 1][k][2]; /* [] */ u41jm1 = tmp * u[i][j - 1][k][3]; /* [] */ u51jm1 = tmp * u[i][j - 1][k][4]; /* [] */ flux[i][j][k][1] = ty3 * (u21j - u21jm1); /* [] */ flux[i][j][k][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /* [] */ flux[i][j][k][3] = ty3 * (u41j - u41jm1); /* [] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /* [] */ /* [] */ /* [] */ /* [] */ for (j = jst; j <= jend; j++) { /* [] */ /* [] */ rsd[i][j][k][0] = rsd[i][j][k][0] + dy1 * ty1 * (u[i][j - 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j + 1][k][0]); /* [] */ rsd[i][j][k][1] = rsd[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (u[i][j - 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j + 1][k][1]); /* [] */ rsd[i][j][k][2] = rsd[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (u[i][j - 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j + 1][k][2]); /* [] */ rsd[i][j][k][3] = rsd[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (u[i][j - 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j + 1][k][3]); /* [] */ rsd[i][j][k][4] = rsd[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (u[i][j - 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j + 1][k][4]); } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][1][k][m] = rsd[i][1][k][m] - dssp * (+5.0 * u[i][1][k][m] - 4.0 * u[i][2][k][m] + u[i][3][k][m]); /* [] */ rsd[i][2][k][m] = rsd[i][2][k][m] - dssp * (-4.0 * u[i][1][k][m] + 6.0 * u[i][2][k][m] - 4.0 * u[i][3][k][m] + u[i][4][k][m]); } /* [] */ jst1 = 3; /* [] */ jend1 = ny - 4; /* [] */ /* [] */ /* [] */ /* [] */ for (j = jst1; j <= jend1; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]); } } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][ny - 3][k][m] = rsd[i][ny - 3][k][m] - dssp * (u[i][ny - 5][k][m] - 4.0 * u[i][ny - 4][k][m] + 6.0 * u[i][ny - 3][k][m] - 4.0 * u[i][ny - 2][k][m]); /* [] */ rsd[i][ny - 2][k][m] = rsd[i][ny - 2][k][m] - dssp * (u[i][ny - 4][k][m] - 4.0 * u[i][ny - 3][k][m] + 5.0 * u[i][ny - 2][k][m]); } } } /* [] */ /* [] */ /* [] */ /* [] */ for (i = ist; i <= iend; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = jst; j <= jend; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 0; k <= nz - 1; k++) { /* [] */ /* [] */ flux[i][j][k][0] = u[i][j][k][3]; /* [] */ u41 = u[i][j][k][3] / u[i][j][k][0]; /* [] */ q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; /* [] */ flux[i][j][k][1] = u[i][j][k][1] * u41; /* [] */ flux[i][j][k][2] = u[i][j][k][2] * u41; /* [] */ flux[i][j][k][3] = u[i][j][k][3] * u41 + 0.40e+00 * (u[i][j][k][4] - q); /* [] */ flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u41; } /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz - 2; k++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][k][m] = rsd[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]); } } /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz - 1; k++) { /* [] */ /* [] */ tmp = 1.0 / u[i][j][k][0]; /* [] */ u21k = tmp * u[i][j][k][1]; /* [] */ u31k = tmp * u[i][j][k][2]; /* [] */ u41k = tmp * u[i][j][k][3]; /* [] */ u51k = tmp * u[i][j][k][4]; /* [] */ tmp = 1.0 / u[i][j][k - 1][0]; /* [] */ u21km1 = tmp * u[i][j][k - 1][1]; /* [] */ u31km1 = tmp * u[i][j][k - 1][2]; /* [] */ u41km1 = tmp * u[i][j][k - 1][3]; /* [] */ u51km1 = tmp * u[i][j][k - 1][4]; /* [] */ flux[i][j][k][1] = tz3 * (u21k - u21km1); /* [] */ flux[i][j][k][2] = tz3 * (u31k - u31km1); /* [] */ flux[i][j][k][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /* [] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz - 2; k++) { /* [] */ /* [] */ rsd[i][j][k][0] = rsd[i][j][k][0] + dz1 * tz1 * (u[i][j][k - 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k + 1][0]); /* [] */ rsd[i][j][k][1] = rsd[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (u[i][j][k - 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k + 1][1]); /* [] */ rsd[i][j][k][2] = rsd[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (u[i][j][k - 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k + 1][2]); /* [] */ rsd[i][j][k][3] = rsd[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (u[i][j][k - 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k + 1][3]); /* [] */ rsd[i][j][k][4] = rsd[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (u[i][j][k - 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k + 1][4]); } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][1][m] = rsd[i][j][1][m] - dssp * (+5.0 * u[i][j][1][m] - 4.0 * u[i][j][2][m] + u[i][j][3][m]); /* [] */ rsd[i][j][2][m] = rsd[i][j][2][m] - dssp * (-4.0 * u[i][j][1][m] + 6.0 * u[i][j][2][m] - 4.0 * u[i][j][3][m] + u[i][j][4][m]); } /* [] */ /* [] */ /* [] */ /* [] */ for (k = 3; k <= nz - 4; k++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]); } } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][nz - 3][m] = rsd[i][j][nz - 3][m] - dssp * (u[i][j][nz - 5][m] - 4.0 * u[i][j][nz - 4][m] + 6.0 * u[i][j][nz - 3][m] - 4.0 * u[i][j][nz - 2][m]); /* [] */ rsd[i][j][nz - 2][m] = rsd[i][j][nz - 2][m] - dssp * (u[i][j][nz - 4][m] - 4.0 * u[i][j][nz - 3][m] + 5.0 * u[i][j][nz - 2][m]); } } } } /* [] */ static void setbv() { /* [] */ /* [49] */ /* [49] */ /* [49] */ int i; /* [49] */ int j; /* [49] */ int k; /* [49] */ int iglob; /* [49] */ int jglob; /* [49] */ /* [49] */ /* [49] */ /* [49] */ for (i = 0; i < nx; i++) { /* [49] */ /* [49] */ iglob = i; /* [49] */ /* [49] */ /* [49] */ /* [49] */ for (j = 0; j < ny; j++) { /* [49] */ /* [49] */ jglob = j; /* [49] */ double *_imopVarPre239; /* [49] */ _imopVarPre239 = &u[i][j][0][0]; /* [49] */ exact(iglob, jglob, 0, _imopVarPre239); /* [49] */ /* [49] */ double *_imopVarPre242; /* [49] */ int _imopVarPre243; /* [49] */ _imopVarPre242 = &u[i][j][nz - 1][0]; /* [49] */ _imopVarPre243 = nz - 1; /* [49] */ exact(iglob, jglob, _imopVarPre243, _imopVarPre242); /* [49] */ } } /* [49] */ // /* [49] */ /* [50] */ /* [50] */ /* [50] */ /* [50] */ for (i = 0; i < nx; i++) { /* [50] */ /* [50] */ iglob = i; /* [50] */ /* [50] */ /* [50] */ /* [50] */ for (k = 0; k < nz; k++) { /* [50] */ /* [50] */ double *_imopVarPre245; /* [50] */ _imopVarPre245 = &u[i][0][k][0]; /* [50] */ exact(iglob, 0, k, _imopVarPre245); /* [50] */ } } /* [50] */ // /* [50] */ /* [51] */ /* [51] */ /* [51] */ /* [51] */ for (i = 0; i < nx; i++) { /* [51] */ /* [51] */ iglob = i; /* [51] */ /* [51] */ /* [51] */ /* [51] */ for (k = 0; k < nz; k++) { /* [51] */ /* [51] */ double *_imopVarPre248; /* [51] */ int _imopVarPre249; /* [51] */ _imopVarPre248 = &u[i][ny - 1][k][0]; /* [51] */ _imopVarPre249 = ny0 - 1; /* [51] */ exact(iglob, _imopVarPre249, k, _imopVarPre248); /* [51] */ } } /* [51] */ // /* [51] */ /* [52] */ /* [52] */ /* [52] */ /* [52] */ for (j = 0; j < ny; j++) { /* [52] */ /* [52] */ jglob = j; /* [52] */ /* [52] */ /* [52] */ /* [52] */ for (k = 0; k < nz; k++) { /* [52] */ /* [52] */ double *_imopVarPre251; /* [52] */ _imopVarPre251 = &u[0][j][k][0]; /* [52] */ exact(0, jglob, k, _imopVarPre251); /* [52] */ } } /* [52] */ // /* [52] */ /* [53] */ /* [53] */ /* [53] */ /* [53] */ for (j = 0; j < ny; j++) { /* [53] */ /* [53] */ jglob = j; /* [53] */ /* [53] */ /* [53] */ /* [53] */ for (k = 0; k < nz; k++) { /* [53] */ /* [53] */ double *_imopVarPre254; /* [53] */ int _imopVarPre255; /* [53] */ _imopVarPre254 = &u[nx - 1][j][k][0]; /* [53] */ _imopVarPre255 = nx0 - 1; /* [53] */ exact(_imopVarPre255, jglob, k, _imopVarPre254); /* [53] */ } } } /* [] */ static void setcoeff() { /* [] */ /* [] */ dxi = 1.0 / (nx0 - 1); /* [] */ deta = 1.0 / (ny0 - 1); /* [] */ dzeta = 1.0 / (nz0 - 1); /* [] */ tx1 = 1.0 / (dxi * dxi); /* [] */ tx2 = 1.0 / (2.0 * dxi); /* [] */ tx3 = 1.0 / dxi; /* [] */ ty1 = 1.0 / (deta * deta); /* [] */ ty2 = 1.0 / (2.0 * deta); /* [] */ ty3 = 1.0 / deta; /* [] */ tz1 = 1.0 / (dzeta * dzeta); /* [] */ tz2 = 1.0 / (2.0 * dzeta); /* [] */ tz3 = 1.0 / dzeta; /* [] */ ii1 = 1; /* [] */ ii2 = nx0 - 2; /* [] */ ji1 = 1; /* [] */ ji2 = ny0 - 3; /* [] */ ki1 = 2; /* [] */ ki2 = nz0 - 2; /* [] */ dx1 = 0.75; /* [] */ dx2 = dx1; /* [] */ dx3 = dx1; /* [] */ dx4 = dx1; /* [] */ dx5 = dx1; /* [] */ dy1 = 0.75; /* [] */ dy2 = dy1; /* [] */ dy3 = dy1; /* [] */ dy4 = dy1; /* [] */ dy5 = dy1; /* [] */ dz1 = 1.00; /* [] */ dz2 = dz1; /* [] */ dz3 = dz1; /* [] */ dz4 = dz1; /* [] */ dz5 = dz1; /* [] */ int _imopVarPre348; /* [] */ double _imopVarPre349; /* [] */ int _imopVarPre350; /* [] */ double _imopVarPre351; /* [] */ int _imopVarPre358; /* [] */ double _imopVarPre359; /* [] */ _imopVarPre348 = (dy1 > dz1); /* [] */ /* [] */ if (_imopVarPre348) { /* [] */ /* [] */ _imopVarPre349 = dy1; } else { /* [] */ /* [] */ _imopVarPre349 = dz1; } /* [] */ _imopVarPre350 = (dx1 > _imopVarPre349); /* [] */ /* [] */ if (_imopVarPre350) { /* [] */ /* [] */ _imopVarPre351 = dx1; } else { /* [] */ /* [] */ _imopVarPre358 = (dy1 > dz1); /* [] */ /* [] */ if (_imopVarPre358) { /* [] */ /* [] */ _imopVarPre359 = dy1; } else { /* [] */ /* [] */ _imopVarPre359 = dz1; } /* [] */ _imopVarPre351 = _imopVarPre359; } /* [] */ dssp = _imopVarPre351 / 4.0; /* [] */ ce[0][0] = 2.0; /* [] */ ce[0][1] = 0.0; /* [] */ ce[0][2] = 0.0; /* [] */ ce[0][3] = 4.0; /* [] */ ce[0][4] = 5.0; /* [] */ ce[0][5] = 3.0; /* [] */ ce[0][6] = 5.0e-01; /* [] */ ce[0][7] = 2.0e-02; /* [] */ ce[0][8] = 1.0e-02; /* [] */ ce[0][9] = 3.0e-02; /* [] */ ce[0][10] = 5.0e-01; /* [] */ ce[0][11] = 4.0e-01; /* [] */ ce[0][12] = 3.0e-01; /* [] */ ce[1][0] = 1.0; /* [] */ ce[1][1] = 0.0; /* [] */ ce[1][2] = 0.0; /* [] */ ce[1][3] = 0.0; /* [] */ ce[1][4] = 1.0; /* [] */ ce[1][5] = 2.0; /* [] */ ce[1][6] = 3.0; /* [] */ ce[1][7] = 1.0e-02; /* [] */ ce[1][8] = 3.0e-02; /* [] */ ce[1][9] = 2.0e-02; /* [] */ ce[1][10] = 4.0e-01; /* [] */ ce[1][11] = 3.0e-01; /* [] */ ce[1][12] = 5.0e-01; /* [] */ ce[2][0] = 2.0; /* [] */ ce[2][1] = 2.0; /* [] */ ce[2][2] = 0.0; /* [] */ ce[2][3] = 0.0; /* [] */ ce[2][4] = 0.0; /* [] */ ce[2][5] = 2.0; /* [] */ ce[2][6] = 3.0; /* [] */ ce[2][7] = 4.0e-02; /* [] */ ce[2][8] = 3.0e-02; /* [] */ ce[2][9] = 5.0e-02; /* [] */ ce[2][10] = 3.0e-01; /* [] */ ce[2][11] = 5.0e-01; /* [] */ ce[2][12] = 4.0e-01; /* [] */ ce[3][0] = 2.0; /* [] */ ce[3][1] = 2.0; /* [] */ ce[3][2] = 0.0; /* [] */ ce[3][3] = 0.0; /* [] */ ce[3][4] = 0.0; /* [] */ ce[3][5] = 2.0; /* [] */ ce[3][6] = 3.0; /* [] */ ce[3][7] = 3.0e-02; /* [] */ ce[3][8] = 5.0e-02; /* [] */ ce[3][9] = 4.0e-02; /* [] */ ce[3][10] = 2.0e-01; /* [] */ ce[3][11] = 1.0e-01; /* [] */ ce[3][12] = 3.0e-01; /* [] */ ce[4][0] = 5.0; /* [] */ ce[4][1] = 4.0; /* [] */ ce[4][2] = 3.0; /* [] */ ce[4][3] = 2.0; /* [] */ ce[4][4] = 1.0e-01; /* [] */ ce[4][5] = 4.0e-01; /* [] */ ce[4][6] = 3.0e-01; /* [] */ ce[4][7] = 5.0e-02; /* [] */ ce[4][8] = 4.0e-02; /* [] */ ce[4][9] = 3.0e-02; /* [] */ ce[4][10] = 1.0e-01; /* [] */ ce[4][11] = 3.0e-01; /* [] */ ce[4][12] = 2.0e-01; } /* [] */ static void setiv() { /* [] */ /* [54] */ /* [54] */ /* [54] */ int i; /* [54] */ int j; /* [54] */ int k; /* [54] */ int m; /* [54] */ int iglob; /* [54] */ int jglob; /* [54] */ double xi; /* [54] */ double eta; /* [54] */ double zeta; /* [54] */ double pxi; /* [54] */ double peta; /* [54] */ double pzeta; /* [54] */ double ue_1jk[5]; /* [54] */ double ue_nx0jk[5]; /* [54] */ double ue_i1k[5]; /* [54] */ double ue_iny0k[5]; /* [54] */ double ue_ij1[5]; /* [54] */ double ue_ijnz[5]; /* [54] */ /* [54] */ /* [54] */ /* [54] */ for (j = 0; j < ny; j++) { /* [54] */ /* [54] */ jglob = j; /* [54] */ /* [54] */ /* [54] */ /* [54] */ for (k = 1; k < nz - 1; k++) { /* [54] */ /* [54] */ zeta = ((double)k) / (nz - 1); /* [54] */ int _imopVarPre361; /* [54] */ _imopVarPre361 = jglob != 0; /* [54] */ /* [54] */ if (_imopVarPre361) { /* [54] */ /* [54] */ _imopVarPre361 = jglob != ny0 - 1; } /* [54] */ /* [54] */ if (_imopVarPre361) { /* [54] */ /* [54] */ eta = ((double)jglob) / (ny0 - 1); /* [54] */ /* [54] */ /* [54] */ /* [54] */ for (i = 0; i < nx; i++) { /* [54] */ /* [54] */ iglob = i; /* [54] */ int _imopVarPre363; /* [54] */ _imopVarPre363 = iglob != 0; /* [54] */ /* [54] */ if (_imopVarPre363) { /* [54] */ /* [54] */ _imopVarPre363 = iglob != nx0 - 1; } /* [54] */ /* [54] */ if (_imopVarPre363) { /* [54] */ /* [54] */ xi = ((double)iglob) / (nx0 - 1); /* [54] */ exact(0, jglob, k, ue_1jk); /* [54] */ /* [54] */ int _imopVarPre365; /* [54] */ _imopVarPre365 = nx0 - 1; /* [54] */ exact(_imopVarPre365, jglob, k, ue_nx0jk); /* [54] */ /* [54] */ exact(iglob, 0, k, ue_i1k); /* [54] */ /* [54] */ int _imopVarPre367; /* [54] */ _imopVarPre367 = ny0 - 1; /* [54] */ exact(iglob, _imopVarPre367, k, ue_iny0k); /* [54] */ /* [54] */ exact(iglob, jglob, 0, ue_ij1); /* [54] */ /* [54] */ int _imopVarPre369; /* [54] */ _imopVarPre369 = nz - 1; /* [54] */ exact(iglob, jglob, _imopVarPre369, ue_ijnz); /* [54] */ /* [54] */ /* [54] */ /* [54] */ /* [54] */ for (m = 0; m < 5; m++) { /* [54] */ /* [54] */ pxi = (1.0 - xi) * ue_1jk[m] + xi * ue_nx0jk[m]; /* [54] */ peta = (1.0 - eta) * ue_i1k[m] + eta * ue_iny0k[m]; /* [54] */ pzeta = (1.0 - zeta) * ue_ij1[m] + zeta * ue_ijnz[m]; /* [54] */ u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta; } } } } } } } /* [] */ static void ssor() { /* [] */ /* [] */ int i; /* [] */ int j; /* [] */ int k; /* [] */ int m; /* [] */ int istep; /* [] */ double tmp; /* [] */ double delunm[5]; /* [] */ double tv[12][12][5]; /* [] */ tmp = 1.0 / (omega * (2.0 - omega)); /* [55] */ /* [55] */ /* [55] */ /* [55] */ /* [55] */ /* [55] */ for (i = 0; i < 12; i++) { /* [55] */ /* [55] */ /* [55] */ /* [55] */ /* [55] */ for (j = 0; j < 12; j++) { /* [55] */ /* [55] */ /* [55] */ /* [55] */ /* [55] */ for (k = 0; k < 5; k++) { /* [55] */ /* [55] */ /* [55] */ /* [55] */ /* [55] */ for (m = 0; m < 5; m++) { /* [55] */ /* [55] */ a[i][j][k][m] = 0.0; /* [55] */ b[i][j][k][m] = 0.0; /* [55] */ c[i][j][k][m] = 0.0; /* [55] */ d[i][j][k][m] = 0.0; } } } } /* [56] */ /* [56] */ /* [56] */ int i_imopVarPre84; /* [56] */ int j_imopVarPre85; /* [56] */ int k_imopVarPre86; /* [56] */ int m_imopVarPre87; /* [56] */ int L1; /* [56] */ int L2; /* [56] */ int ist1; /* [56] */ int iend1; /* [56] */ int jst1; /* [56] */ int jend1; /* [56] */ double q; /* [56] */ double u21; /* [56] */ double u31; /* [56] */ double u41; /* [56] */ double tmp_imopVarPre88; /* [56] */ double u21i; /* [56] */ double u31i; /* [56] */ double u41i; /* [56] */ double u51i; /* [56] */ double u21j; /* [56] */ double u31j; /* [56] */ double u41j; /* [56] */ double u51j; /* [56] */ double u21k; /* [56] */ double u31k; /* [56] */ double u41k; /* [56] */ double u51k; /* [56] */ double u21im1; /* [56] */ double u31im1; /* [56] */ double u41im1; /* [56] */ double u51im1; /* [56] */ double u21jm1; /* [56] */ double u31jm1; /* [56] */ double u41jm1; /* [56] */ double u51jm1; /* [56] */ double u21km1; /* [56] */ double u31km1; /* [56] */ double u41km1; /* [56] */ double u51km1; /* [56] */ /* [56] */ /* [56] */ /* [56] */ for (i_imopVarPre84 = 0; i_imopVarPre84 <= nx - 1; i_imopVarPre84++) { /* [56] */ /* [56] */ /* [56] */ /* [56] */ /* [56] */ for (j_imopVarPre85 = 0; j_imopVarPre85 <= ny - 1; j_imopVarPre85++) { /* [56] */ /* [56] */ /* [56] */ /* [56] */ /* [56] */ for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /* [56] */ /* [56] */ /* [56] */ /* [56] */ /* [56] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [56] */ /* [56] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = -frct[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]; } } } } /* [56] */ L1 = 0; /* [56] */ L2 = nx - 1; /* [56] */ /* [56] */ /* [56] */ /* [56] */ for (i_imopVarPre84 = L1; i_imopVarPre84 <= L2; i_imopVarPre84++) { /* [56] */ /* [56] */ /* [56] */ /* [56] */ /* [56] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [56] */ /* [56] */ /* [56] */ /* [56] */ /* [56] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [56] */ /* [56] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /* [56] */ u21 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [56] */ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [56] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u21 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /* [56] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u21; /* [56] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u21; /* [56] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u21; } } } /* [56] */ // /* [56] */ /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [57] */ /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [57] */ /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [57] */ /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [57] */ /* [57] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tx2 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } /* [57] */ L2 = nx - 1; /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= L2; i_imopVarPre84++) { /* [57] */ /* [57] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [57] */ u21i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /* [57] */ u31i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /* [57] */ u41i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /* [57] */ u51i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /* [57] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0]; /* [57] */ u21im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1]; /* [57] */ u31im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2]; /* [57] */ u41im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3]; /* [57] */ u51im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4]; /* [57] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /* [57] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tx3 * (u31i - u31im1); /* [57] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = tx3 * (u41i - u41im1); /* [57] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [57] */ /* [57] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dx1 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][0]); /* [57] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dx2 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1]); /* [57] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dx3 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2]); /* [57] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dx4 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3]); /* [57] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dx5 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4]); } /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [57] */ /* [57] */ rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); /* [57] */ rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } /* [57] */ ist1 = 3; /* [57] */ iend1 = nx - 4; /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (i_imopVarPre84 = ist1; i_imopVarPre84 <= iend1; i_imopVarPre84++) { /* [57] */ /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [57] */ /* [57] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84 - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84 + 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [57] */ /* [57] */ rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 5][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); /* [57] */ rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } } /* [57] */ // /* [57] */ /* [58] */ L1 = 0; /* [58] */ L2 = ny - 1; /* [58] */ /* [58] */ /* [58] */ /* [58] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [58] */ /* [58] */ /* [58] */ /* [58] */ /* [58] */ for (j_imopVarPre85 = L1; j_imopVarPre85 <= L2; j_imopVarPre85++) { /* [58] */ /* [58] */ /* [58] */ /* [58] */ /* [58] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [58] */ /* [58] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /* [58] */ u31 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [58] */ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [58] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u31; /* [58] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u31 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /* [58] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u31; /* [58] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u31; } } } /* [58] */ // /* [58] */ /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [59] */ /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [59] */ /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [59] */ /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [59] */ /* [59] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - ty2 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87]); } } /* [59] */ L2 = ny - 1; /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= L2; j_imopVarPre85++) { /* [59] */ /* [59] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [59] */ u21j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /* [59] */ u31j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /* [59] */ u41j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /* [59] */ u51j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /* [59] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0]; /* [59] */ u21jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1]; /* [59] */ u31jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2]; /* [59] */ u41jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3]; /* [59] */ u51jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4]; /* [59] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = ty3 * (u21j - u21jm1); /* [59] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /* [59] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = ty3 * (u41j - u41jm1); /* [59] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [59] */ /* [59] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dy1 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][0]); /* [59] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dy2 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1]); /* [59] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dy3 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2]); /* [59] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dy4 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3]); /* [59] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dy5 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4]); } /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [59] */ /* [59] */ rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87]); /* [59] */ rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][4][k_imopVarPre86][m_imopVarPre87]); } /* [59] */ jst1 = 3; /* [59] */ jend1 = ny - 4; /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (j_imopVarPre85 = jst1; j_imopVarPre85 <= jend1; j_imopVarPre85++) { /* [59] */ /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [59] */ /* [59] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85 - 2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85 + 2][k_imopVarPre86][m_imopVarPre87]); } } /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [59] */ /* [59] */ rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 5][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]); /* [59] */ rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]); } } } /* [59] */ // /* [59] */ /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [60] */ /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [60] */ /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /* [60] */ /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /* [60] */ u41 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [60] */ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u41; /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u41; /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u41 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u41; } /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [60] */ /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [60] */ /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tz2 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87]); } } /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /* [60] */ /* [60] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [60] */ u21k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /* [60] */ u31k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /* [60] */ u41k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /* [60] */ u51k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /* [60] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0]; /* [60] */ u21km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1]; /* [60] */ u31km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2]; /* [60] */ u41km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3]; /* [60] */ u51km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4]; /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = tz3 * (u21k - u21km1); /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tz3 * (u31k - u31km1); /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [60] */ /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dz1 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][0]); /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dz2 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1]); /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dz3 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2]); /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dz4 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3]); /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dz5 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4]); } /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [60] */ /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87]); /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][4][m_imopVarPre87]); } /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (k_imopVarPre86 = 3; k_imopVarPre86 <= nz - 4; k_imopVarPre86++) { /* [60] */ /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [60] */ /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 2][m_imopVarPre87]); } } /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [60] */ /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 5][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]); /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]); } } } /* [61] */ /* [61] */ /* [61] */ double (*v)[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [61] */ double *sum; /* [61] */ v = rsd; /* [61] */ sum = rsdnm; /* [61] */ int i_imopVarPre75; /* [61] */ int j_imopVarPre76; /* [61] */ int k_imopVarPre77; /* [61] */ int m_imopVarPre78; /* [61] */ double sum0 = 0.0; /* [61] */ double sum1 = 0.0; /* [61] */ double sum2 = 0.0; /* [61] */ double sum3 = 0.0; /* [61] */ double sum4 = 0.0; /* [61] */ #pragma omp single nowait { /* [61] */ /* [61] */ /* [61] */ /* [61] */ /* [61] */ for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) { /* [61] */ /* [61] */ sum[m_imopVarPre78] = 0.0; } } /* [61] */ // /* [61] */ /* [62] */ /* [62] */ /* [62] */ /* [62] */ for (i_imopVarPre75 = ist; i_imopVarPre75 <= iend; i_imopVarPre75++) { /* [62] */ /* [62] */ /* [62] */ /* [62] */ /* [62] */ for (j_imopVarPre76 = jst; j_imopVarPre76 <= jend; j_imopVarPre76++) { /* [62] */ /* [62] */ /* [62] */ /* [62] */ /* [62] */ for (k_imopVarPre77 = 1; k_imopVarPre77 <= nz0 - 2; k_imopVarPre77++) { /* [62] */ /* [62] */ sum0 = sum0 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0]; /* [62] */ sum1 = sum1 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1]; /* [62] */ sum2 = sum2 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2]; /* [62] */ sum3 = sum3 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3]; /* [62] */ sum4 = sum4 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4]; } } } /* [62] */ // /* [62] */ /* [62] */ /* [62] */ sum[0] += sum0; /* [62] */ sum[1] += sum1; /* [62] */ sum[2] += sum2; /* [62] */ sum[3] += sum3; /* [62] */ sum[4] += sum4; /* [62] */ // /* [62] */ // /* [62] */ /* [63] */ /* [63] */ /* [63] */ /* [63] */ /* [63] */ /* [63] */ for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) { /* [63] */ /* [63] */ double _imopVarPre154; /* [63] */ double _imopVarPre155; /* [63] */ _imopVarPre154 = sum[m_imopVarPre78] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /* [63] */ _imopVarPre155 = sqrt(_imopVarPre154); /* [63] */ /* [63] */ sum[m_imopVarPre78] = _imopVarPre155; } /* [] */ timer_clear(1); /* [] */ /* [] */ timer_start(1); /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (istep = 1; istep <= itmax; istep++) { /* [] */ /* [] */ int _imopVarPre372; /* [] */ int _imopVarPre370; /* [] */ int _imopVarPre371; /* [] */ _imopVarPre370 = istep % 20 == 0; /* [] */ /* [] */ if (!_imopVarPre370) { /* [] */ /* [] */ _imopVarPre371 = istep == itmax; /* [] */ /* [] */ if (!_imopVarPre371) { /* [] */ /* [] */ _imopVarPre371 = istep == 1; } /* [] */ _imopVarPre370 = _imopVarPre371; } /* [] */ /* [] */ if (_imopVarPre370) { /* [] */ /* [] */ /* [] */ /* [] */ printf(" Time step %4d\n", istep); /* [] */ } /* [64] */ /* [64] */ /* [64] */ int _imopVarPre377; /* [64] */ int _imopVarPre378; /* [64] */ int _imopVarPre379; /* [64] */ int _imopVarPre380; /* [64] */ /* [64] */ /* [64] */ /* [64] */ for (i = ist; i <= iend; i++) { /* [64] */ /* [64] */ /* [64] */ /* [64] */ /* [64] */ for (j = jst; j <= jend; j++) { /* [64] */ /* [64] */ /* [64] */ /* [64] */ /* [64] */ for (k = 1; k <= nz - 2; k++) { /* [64] */ /* [64] */ /* [64] */ /* [64] */ /* [64] */ for (m = 0; m < 5; m++) { /* [64] */ /* [64] */ rsd[i][j][k][m] = dt * rsd[i][j][k][m]; } } } } /* [64] */ // /* [64] */ /* [41] */ /* [41] */ /* [41] */ /* [41] */ for (k = 1; k <= nz - 2; k++) { /* [41] */ /* [41] */ jacld(k); /* [41] */ /* [41] */ blts(nx, ny, nz, k, omega, rsd, a, b, c, d, ist, iend, jst, jend, nx0, ny0); /* [41] */ } /* [41] */ // /* [41] */ /* [42] */ /* [42] */ /* [42] */ /* [42] */ for (k = nz - 2; k >= 1; k--) { /* [42] */ /* [42] */ jacu(k); /* [42] */ /* [42] */ buts(nx, ny, nz, k, omega, rsd, tv, d, a, b, c, ist, iend, jst, jend, nx0, ny0); /* [42] */ } /* [42] */ // /* [42] */ /* [65] */ /* [65] */ /* [65] */ /* [65] */ for (i = ist; i <= iend; i++) { /* [65] */ /* [65] */ /* [65] */ /* [65] */ /* [65] */ for (j = jst; j <= jend; j++) { /* [65] */ /* [65] */ /* [65] */ /* [65] */ /* [65] */ for (k = 1; k <= nz - 2; k++) { /* [65] */ /* [65] */ /* [65] */ /* [65] */ /* [65] */ for (m = 0; m < 5; m++) { /* [65] */ /* [65] */ u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m]; } } } } /* [65] */ /* [65] */ if (istep % inorm == 0) { /* [65] */ /* [65] */ double (*v)[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [65] */ double *sum; /* [65] */ v = rsd; /* [65] */ sum = delunm; /* [65] */ int i_imopVarPre89; /* [65] */ int j_imopVarPre90; /* [65] */ int k_imopVarPre91; /* [65] */ int m_imopVarPre92; /* [65] */ double sum0 = 0.0; /* [65] */ double sum1 = 0.0; /* [65] */ double sum2 = 0.0; /* [65] */ double sum3 = 0.0; /* [65] */ double sum4 = 0.0; /* [65] */ /* [65] */ /* [65] */ /* [65] */ /* [65] */ /* [65] */ for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) { /* [65] */ /* [65] */ sum[m_imopVarPre92] = 0.0; } /* [65] */ // /* [65] */ /* [66] */ /* [66] */ /* [66] */ /* [66] */ for (i_imopVarPre89 = ist; i_imopVarPre89 <= iend; i_imopVarPre89++) { /* [66] */ /* [66] */ /* [66] */ /* [66] */ /* [66] */ for (j_imopVarPre90 = jst; j_imopVarPre90 <= jend; j_imopVarPre90++) { /* [66] */ /* [66] */ /* [66] */ /* [66] */ /* [66] */ for (k_imopVarPre91 = 1; k_imopVarPre91 <= nz0 - 2; k_imopVarPre91++) { /* [66] */ /* [66] */ sum0 = sum0 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0]; /* [66] */ sum1 = sum1 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1]; /* [66] */ sum2 = sum2 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2]; /* [66] */ sum3 = sum3 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3]; /* [66] */ sum4 = sum4 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4]; } } } /* [66] */ // /* [66] */ /* [66] */ /* [66] */ sum[0] += sum0; /* [66] */ sum[1] += sum1; /* [66] */ sum[2] += sum2; /* [66] */ sum[3] += sum3; /* [66] */ sum[4] += sum4; /* [66] */ // /* [66] */ // /* [66] */ /* [67] */ /* [67] */ /* [67] */ /* [67] */ /* [67] */ /* [67] */ for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) { /* [67] */ /* [67] */ double _imopVarPre154; /* [67] */ double _imopVarPre155; /* [67] */ _imopVarPre154 = sum[m_imopVarPre92] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /* [67] */ _imopVarPre155 = sqrt(_imopVarPre154); /* [67] */ /* [67] */ sum[m_imopVarPre92] = _imopVarPre155; } /* [67] */ // /* [67] */ /* [68] */ // /* [68] */ } /* [65, 69] */ // /* [65, 69] */ /* [66, 70] */ int i_imopVarPre79; /* [66, 70] */ int j_imopVarPre80; /* [66, 70] */ int k_imopVarPre81; /* [66, 70] */ int m_imopVarPre82; /* [66, 70] */ int L1; /* [66, 70] */ int L2; /* [66, 70] */ int ist1; /* [66, 70] */ int iend1; /* [66, 70] */ int jst1; /* [66, 70] */ int jend1; /* [66, 70] */ double q; /* [66, 70] */ double u21; /* [66, 70] */ double u31; /* [66, 70] */ double u41; /* [66, 70] */ double tmp_imopVarPre83; /* [66, 70] */ double u21i; /* [66, 70] */ double u31i; /* [66, 70] */ double u41i; /* [66, 70] */ double u51i; /* [66, 70] */ double u21j; /* [66, 70] */ double u31j; /* [66, 70] */ double u41j; /* [66, 70] */ double u51j; /* [66, 70] */ double u21k; /* [66, 70] */ double u31k; /* [66, 70] */ double u41k; /* [66, 70] */ double u51k; /* [66, 70] */ double u21im1; /* [66, 70] */ double u31im1; /* [66, 70] */ double u41im1; /* [66, 70] */ double u51im1; /* [66, 70] */ double u21jm1; /* [66, 70] */ double u31jm1; /* [66, 70] */ double u41jm1; /* [66, 70] */ double u51jm1; /* [66, 70] */ double u21km1; /* [66, 70] */ double u31km1; /* [66, 70] */ double u41km1; /* [66, 70] */ double u51km1; /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ for (i_imopVarPre79 = 0; i_imopVarPre79 <= nx - 1; i_imopVarPre79++) { /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ for (j_imopVarPre80 = 0; j_imopVarPre80 <= ny - 1; j_imopVarPre80++) { /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [66, 70] */ /* [66, 70] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = -frct[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]; } } } } /* [66, 70] */ // /* [66, 70] */ /* [67, 71] */ L1 = 0; /* [67, 71] */ L2 = nx - 1; /* [67, 71] */ /* [67, 71] */ /* [67, 71] */ /* [67, 71] */ for (i_imopVarPre79 = L1; i_imopVarPre79 <= L2; i_imopVarPre79++) { /* [67, 71] */ /* [67, 71] */ /* [67, 71] */ /* [67, 71] */ /* [67, 71] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [67, 71] */ /* [67, 71] */ /* [67, 71] */ /* [67, 71] */ /* [67, 71] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [67, 71] */ /* [67, 71] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /* [67, 71] */ u21 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [67, 71] */ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [67, 71] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u21 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /* [67, 71] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u21; /* [67, 71] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u21; /* [67, 71] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u21; } } } /* [67, 71] */ // /* [67, 71] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [68, 72] */ /* [68, 72] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tx2 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } /* [68, 72] */ L2 = nx - 1; /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= L2; i_imopVarPre79++) { /* [68, 72] */ /* [68, 72] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [68, 72] */ u21i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /* [68, 72] */ u31i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /* [68, 72] */ u41i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /* [68, 72] */ u51i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /* [68, 72] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0]; /* [68, 72] */ u21im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1]; /* [68, 72] */ u31im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2]; /* [68, 72] */ u41im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3]; /* [68, 72] */ u51im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4]; /* [68, 72] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /* [68, 72] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tx3 * (u31i - u31im1); /* [68, 72] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = tx3 * (u41i - u41im1); /* [68, 72] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [68, 72] */ /* [68, 72] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dx1 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][0]); /* [68, 72] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dx2 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1]); /* [68, 72] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dx3 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2]); /* [68, 72] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dx4 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3]); /* [68, 72] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dx5 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4]); } /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [68, 72] */ /* [68, 72] */ rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); /* [68, 72] */ rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } /* [68, 72] */ ist1 = 3; /* [68, 72] */ iend1 = nx - 4; /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (i_imopVarPre79 = ist1; i_imopVarPre79 <= iend1; i_imopVarPre79++) { /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [68, 72] */ /* [68, 72] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79 - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79 + 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [68, 72] */ /* [68, 72] */ rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 5][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); /* [68, 72] */ rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } } /* [68, 72] */ // /* [68, 72] */ /* [69, 73] */ L1 = 0; /* [69, 73] */ L2 = ny - 1; /* [69, 73] */ /* [69, 73] */ /* [69, 73] */ /* [69, 73] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [69, 73] */ /* [69, 73] */ /* [69, 73] */ /* [69, 73] */ /* [69, 73] */ for (j_imopVarPre80 = L1; j_imopVarPre80 <= L2; j_imopVarPre80++) { /* [69, 73] */ /* [69, 73] */ /* [69, 73] */ /* [69, 73] */ /* [69, 73] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [69, 73] */ /* [69, 73] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /* [69, 73] */ u31 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [69, 73] */ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [69, 73] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u31; /* [69, 73] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u31 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /* [69, 73] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u31; /* [69, 73] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u31; } } } /* [69, 73] */ // /* [69, 73] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [70, 74] */ /* [70, 74] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - ty2 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82]); } } /* [70, 74] */ L2 = ny - 1; /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= L2; j_imopVarPre80++) { /* [70, 74] */ /* [70, 74] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [70, 74] */ u21j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /* [70, 74] */ u31j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /* [70, 74] */ u41j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /* [70, 74] */ u51j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /* [70, 74] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0]; /* [70, 74] */ u21jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1]; /* [70, 74] */ u31jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2]; /* [70, 74] */ u41jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3]; /* [70, 74] */ u51jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4]; /* [70, 74] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = ty3 * (u21j - u21jm1); /* [70, 74] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /* [70, 74] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = ty3 * (u41j - u41jm1); /* [70, 74] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [70, 74] */ /* [70, 74] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dy1 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][0]); /* [70, 74] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dy2 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1]); /* [70, 74] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dy3 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2]); /* [70, 74] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dy4 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3]); /* [70, 74] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dy5 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4]); } /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [70, 74] */ /* [70, 74] */ rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82]); /* [70, 74] */ rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][4][k_imopVarPre81][m_imopVarPre82]); } /* [70, 74] */ jst1 = 3; /* [70, 74] */ jend1 = ny - 4; /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (j_imopVarPre80 = jst1; j_imopVarPre80 <= jend1; j_imopVarPre80++) { /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [70, 74] */ /* [70, 74] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80 - 2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80 + 2][k_imopVarPre81][m_imopVarPre82]); } } /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [70, 74] */ /* [70, 74] */ rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 5][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]); /* [70, 74] */ rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]); } } } /* [70, 74] */ // /* [70, 74] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /* [71, 75] */ /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /* [71, 75] */ u41 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [71, 75] */ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u41; /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u41; /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u41 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u41; } /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [71, 75] */ /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tz2 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82]); } } /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /* [71, 75] */ /* [71, 75] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [71, 75] */ u21k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /* [71, 75] */ u31k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /* [71, 75] */ u41k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /* [71, 75] */ u51k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /* [71, 75] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0]; /* [71, 75] */ u21km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1]; /* [71, 75] */ u31km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2]; /* [71, 75] */ u41km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3]; /* [71, 75] */ u51km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4]; /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = tz3 * (u21k - u21km1); /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tz3 * (u31k - u31km1); /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [71, 75] */ /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dz1 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][0]); /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dz2 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1]); /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dz3 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2]); /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dz4 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3]); /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dz5 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4]); } /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [71, 75] */ /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82]); /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][4][m_imopVarPre82]); } /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (k_imopVarPre81 = 3; k_imopVarPre81 <= nz - 4; k_imopVarPre81++) { /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [71, 75] */ /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 2][m_imopVarPre82]); } } /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [71, 75] */ /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 5][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]); /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]); } } } /* [71, 75] */ // /* [71, 75] */ /* [72, 76] */ // /* [72, 76] */ /* [73, 77] */ /* [73, 77] */ /* [73, 77] */ _imopVarPre372 = (istep % inorm == 0); /* [73, 77] */ /* [73, 77] */ if (!_imopVarPre372) { /* [73, 77] */ /* [73, 77] */ _imopVarPre372 = (istep == itmax); } /* [73, 77] */ // /* [73, 77] */ /* [74] */ /* [74] */ if (_imopVarPre372) { /* [74] */ /* [74] */ double (*v)[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [74] */ double *sum; /* [74] */ v = rsd; /* [74] */ sum = rsdnm; /* [74] */ int i_imopVarPre93; /* [74] */ int j_imopVarPre94; /* [74] */ int k_imopVarPre95; /* [74] */ int m_imopVarPre96; /* [74] */ double sum0 = 0.0; /* [74] */ double sum1 = 0.0; /* [74] */ double sum2 = 0.0; /* [74] */ double sum3 = 0.0; /* [74] */ double sum4 = 0.0; /* [74] */ /* [74] */ /* [74] */ /* [74] */ /* [74] */ /* [74] */ for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) { /* [74] */ /* [74] */ sum[m_imopVarPre96] = 0.0; } /* [74] */ // /* [74] */ /* [75] */ /* [75] */ /* [75] */ /* [75] */ for (i_imopVarPre93 = ist; i_imopVarPre93 <= iend; i_imopVarPre93++) { /* [75] */ /* [75] */ /* [75] */ /* [75] */ /* [75] */ for (j_imopVarPre94 = jst; j_imopVarPre94 <= jend; j_imopVarPre94++) { /* [75] */ /* [75] */ /* [75] */ /* [75] */ /* [75] */ for (k_imopVarPre95 = 1; k_imopVarPre95 <= nz0 - 2; k_imopVarPre95++) { /* [75] */ /* [75] */ sum0 = sum0 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0]; /* [75] */ sum1 = sum1 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1]; /* [75] */ sum2 = sum2 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2]; /* [75] */ sum3 = sum3 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3]; /* [75] */ sum4 = sum4 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4]; } } } /* [75] */ // /* [75] */ /* [75] */ /* [75] */ sum[0] += sum0; /* [75] */ sum[1] += sum1; /* [75] */ sum[2] += sum2; /* [75] */ sum[3] += sum3; /* [75] */ sum[4] += sum4; /* [75] */ // /* [75] */ // /* [75] */ /* [76] */ /* [76] */ /* [76] */ /* [76] */ /* [76] */ /* [76] */ for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) { /* [76] */ /* [76] */ double _imopVarPre154; /* [76] */ double _imopVarPre155; /* [76] */ _imopVarPre154 = sum[m_imopVarPre96] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /* [76] */ _imopVarPre155 = sqrt(_imopVarPre154); /* [76] */ /* [76] */ sum[m_imopVarPre96] = _imopVarPre155; } } /* [74, 76] */ // /* [74, 76] */ /* [75, 77] */ /* [75, 77] */ /* [75, 77] */ _imopVarPre377 = (rsdnm[0] < tolrsd[0]); /* [75, 77] */ /* [75, 77] */ if (_imopVarPre377) { /* [75, 77] */ /* [75, 77] */ _imopVarPre378 = (rsdnm[1] < tolrsd[1]); /* [75, 77] */ /* [75, 77] */ if (_imopVarPre378) { /* [75, 77] */ /* [75, 77] */ _imopVarPre379 = (rsdnm[2] < tolrsd[2]); /* [75, 77] */ /* [75, 77] */ if (_imopVarPre379) { /* [75, 77] */ /* [75, 77] */ _imopVarPre380 = (rsdnm[3] < tolrsd[3]); /* [75, 77] */ /* [75, 77] */ if (_imopVarPre380) { /* [75, 77] */ /* [75, 77] */ _imopVarPre380 = (rsdnm[4] < tolrsd[4]); } /* [75, 77] */ _imopVarPre379 = _imopVarPre380; } /* [75, 77] */ _imopVarPre378 = _imopVarPre379; } /* [75, 77] */ _imopVarPre377 = _imopVarPre378; } /* [75, 77] */ /* [75, 77] */ if (_imopVarPre377) { /* [75, 77] */ /* [75, 77] */ exit(1); /* [75, 77] */ } } /* [] */ timer_stop(1); /* [] */ /* [] */ maxtime = timer_read(1); /* [] */ } /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ static void verify(double xcr[5], double xce[5], double xci, char *class, boolean * verified) { /* [] */ /* [] */ double xcrref[5]; /* [] */ double xceref[5]; /* [] */ double xciref; /* [] */ double xcrdif[5]; /* [] */ double xcedif[5]; /* [] */ double xcidif; /* [] */ double epsilon; /* [] */ double dtref; /* [] */ int m; /* [] */ epsilon = 1.0e-08; /* [] */ *class = 'U'; /* [] */ *verified = 1; /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ xcrref[m] = 1.0; /* [] */ xceref[m] = 1.0; } /* [] */ xciref = 1.0; /* [] */ int _imopVarPre384; /* [] */ int _imopVarPre385; /* [] */ int _imopVarPre386; /* [] */ _imopVarPre384 = nx0 == 12; /* [] */ /* [] */ if (_imopVarPre384) { /* [] */ /* [] */ _imopVarPre385 = ny0 == 12; /* [] */ /* [] */ if (_imopVarPre385) { /* [] */ /* [] */ _imopVarPre386 = nz0 == 12; /* [] */ /* [] */ if (_imopVarPre386) { /* [] */ /* [] */ _imopVarPre386 = itmax == 50; } /* [] */ _imopVarPre385 = _imopVarPre386; } /* [] */ _imopVarPre384 = _imopVarPre385; } /* [] */ /* [] */ if (_imopVarPre384) { /* [] */ /* [] */ *class = 'S'; /* [] */ dtref = 5.0e-1; /* [] */ xcrref[0] = 1.6196343210976702e-02; /* [] */ xcrref[1] = 2.1976745164821318e-03; /* [] */ xcrref[2] = 1.5179927653399185e-03; /* [] */ xcrref[3] = 1.5029584435994323e-03; /* [] */ xcrref[4] = 3.4264073155896461e-02; /* [] */ xceref[0] = 6.4223319957960924e-04; /* [] */ xceref[1] = 8.4144342047347926e-05; /* [] */ xceref[2] = 5.8588269616485186e-05; /* [] */ xceref[3] = 5.8474222595157350e-05; /* [] */ xceref[4] = 1.3103347914111294e-03; /* [] */ xciref = 7.8418928865937083; } else { /* [] */ /* [] */ int _imopVarPre390; /* [] */ int _imopVarPre391; /* [] */ int _imopVarPre392; /* [] */ _imopVarPre390 = nx0 == 33; /* [] */ /* [] */ if (_imopVarPre390) { /* [] */ /* [] */ _imopVarPre391 = ny0 == 33; /* [] */ /* [] */ if (_imopVarPre391) { /* [] */ /* [] */ _imopVarPre392 = nz0 == 33; /* [] */ /* [] */ if (_imopVarPre392) { /* [] */ /* [] */ _imopVarPre392 = itmax == 300; } /* [] */ _imopVarPre391 = _imopVarPre392; } /* [] */ _imopVarPre390 = _imopVarPre391; } /* [] */ /* [] */ if (_imopVarPre390) { /* [] */ /* [] */ *class = 'W'; /* [] */ dtref = 1.5e-3; /* [] */ xcrref[0] = 0.1236511638192e+02; /* [] */ xcrref[1] = 0.1317228477799e+01; /* [] */ xcrref[2] = 0.2550120713095e+01; /* [] */ xcrref[3] = 0.2326187750252e+01; /* [] */ xcrref[4] = 0.2826799444189e+02; /* [] */ xceref[0] = 0.4867877144216; /* [] */ xceref[1] = 0.5064652880982e-01; /* [] */ xceref[2] = 0.9281818101960e-01; /* [] */ xceref[3] = 0.8570126542733e-01; /* [] */ xceref[4] = 0.1084277417792e+01; /* [] */ xciref = 0.1161399311023e+02; } else { /* [] */ /* [] */ int _imopVarPre396; /* [] */ int _imopVarPre397; /* [] */ int _imopVarPre398; /* [] */ _imopVarPre396 = nx0 == 64; /* [] */ /* [] */ if (_imopVarPre396) { /* [] */ /* [] */ _imopVarPre397 = ny0 == 64; /* [] */ /* [] */ if (_imopVarPre397) { /* [] */ /* [] */ _imopVarPre398 = nz0 == 64; /* [] */ /* [] */ if (_imopVarPre398) { /* [] */ /* [] */ _imopVarPre398 = itmax == 250; } /* [] */ _imopVarPre397 = _imopVarPre398; } /* [] */ _imopVarPre396 = _imopVarPre397; } /* [] */ /* [] */ if (_imopVarPre396) { /* [] */ /* [] */ *class = 'A'; /* [] */ dtref = 2.0e+0; /* [] */ xcrref[0] = 7.7902107606689367e+02; /* [] */ xcrref[1] = 6.3402765259692870e+01; /* [] */ xcrref[2] = 1.9499249727292479e+02; /* [] */ xcrref[3] = 1.7845301160418537e+02; /* [] */ xcrref[4] = 1.8384760349464247e+03; /* [] */ xceref[0] = 2.9964085685471943e+01; /* [] */ xceref[1] = 2.8194576365003349; /* [] */ xceref[2] = 7.3473412698774742; /* [] */ xceref[3] = 6.7139225687777051; /* [] */ xceref[4] = 7.0715315688392578e+01; /* [] */ xciref = 2.6030925604886277e+01; } else { /* [] */ /* [] */ int _imopVarPre402; /* [] */ int _imopVarPre403; /* [] */ int _imopVarPre404; /* [] */ _imopVarPre402 = nx0 == 102; /* [] */ /* [] */ if (_imopVarPre402) { /* [] */ /* [] */ _imopVarPre403 = ny0 == 102; /* [] */ /* [] */ if (_imopVarPre403) { /* [] */ /* [] */ _imopVarPre404 = nz0 == 102; /* [] */ /* [] */ if (_imopVarPre404) { /* [] */ /* [] */ _imopVarPre404 = itmax == 250; } /* [] */ _imopVarPre403 = _imopVarPre404; } /* [] */ _imopVarPre402 = _imopVarPre403; } /* [] */ /* [] */ if (_imopVarPre402) { /* [] */ /* [] */ *class = 'B'; /* [] */ dtref = 2.0e+0; /* [] */ xcrref[0] = 3.5532672969982736e+03; /* [] */ xcrref[1] = 2.6214750795310692e+02; /* [] */ xcrref[2] = 8.8333721850952190e+02; /* [] */ xcrref[3] = 7.7812774739425265e+02; /* [] */ xcrref[4] = 7.3087969592545314e+03; /* [] */ xceref[0] = 1.1401176380212709e+02; /* [] */ xceref[1] = 8.1098963655421574; /* [] */ xceref[2] = 2.8480597317698308e+01; /* [] */ xceref[3] = 2.5905394567832939e+01; /* [] */ xceref[4] = 2.6054907504857413e+02; /* [] */ xciref = 4.7887162703308227e+01; } else { /* [] */ /* [] */ int _imopVarPre408; /* [] */ int _imopVarPre409; /* [] */ int _imopVarPre410; /* [] */ _imopVarPre408 = nx0 == 162; /* [] */ /* [] */ if (_imopVarPre408) { /* [] */ /* [] */ _imopVarPre409 = ny0 == 162; /* [] */ /* [] */ if (_imopVarPre409) { /* [] */ /* [] */ _imopVarPre410 = nz0 == 162; /* [] */ /* [] */ if (_imopVarPre410) { /* [] */ /* [] */ _imopVarPre410 = itmax == 250; } /* [] */ _imopVarPre409 = _imopVarPre410; } /* [] */ _imopVarPre408 = _imopVarPre409; } /* [] */ /* [] */ if (_imopVarPre408) { /* [] */ /* [] */ *class = 'C'; /* [] */ dtref = 2.0e+0; /* [] */ xcrref[0] = 1.03766980323537846e+04; /* [] */ xcrref[1] = 8.92212458801008552e+02; /* [] */ xcrref[2] = 2.56238814582660871e+03; /* [] */ xcrref[3] = 2.19194343857831427e+03; /* [] */ xcrref[4] = 1.78078057261061185e+04; /* [] */ xceref[0] = 2.15986399716949279e+02; /* [] */ xceref[1] = 1.55789559239863600e+01; /* [] */ xceref[2] = 5.41318863077207766e+01; /* [] */ xceref[3] = 4.82262643154045421e+01; /* [] */ xceref[4] = 4.55902910043250358e+02; /* [] */ xciref = 6.66404553572181300e+01; } else { /* [] */ /* [] */ *verified = 0; } } } } } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ double _imopVarPre412; /* [] */ double _imopVarPre413; /* [] */ _imopVarPre412 = (xcr[m] - xcrref[m]) / xcrref[m]; /* [] */ _imopVarPre413 = fabs(_imopVarPre412); /* [] */ /* [] */ xcrdif[m] = _imopVarPre413; /* [] */ double _imopVarPre415; /* [] */ double _imopVarPre416; /* [] */ _imopVarPre415 = (xce[m] - xceref[m]) / xceref[m]; /* [] */ _imopVarPre416 = fabs(_imopVarPre415); /* [] */ /* [] */ xcedif[m] = _imopVarPre416; } /* [] */ double _imopVarPre418; /* [] */ double _imopVarPre419; /* [] */ _imopVarPre418 = (xci - xciref) / xciref; /* [] */ _imopVarPre419 = fabs(_imopVarPre418); /* [] */ /* [] */ xcidif = _imopVarPre419; /* [] */ /* [] */ if (*class != 'U') { /* [] */ /* [] */ char _imopVarPre421; /* [] */ _imopVarPre421 = *class; /* [] */ printf("\n Verification being performed for class %1c\n", _imopVarPre421); /* [] */ /* [] */ printf(" Accuracy setting for epsilon = %20.13e\n", epsilon); /* [] */ /* [] */ double _imopVarPre424; /* [] */ double _imopVarPre425; /* [] */ _imopVarPre424 = dt - dtref; /* [] */ _imopVarPre425 = fabs(_imopVarPre424); /* [] */ /* [] */ /* [] */ if (_imopVarPre425 > epsilon) { /* [] */ /* [] */ *verified = 0; /* [] */ *class = 'U'; /* [] */ printf(" DT does not match the reference value of %15.8e\n", dtref); /* [] */ } } else { /* [] */ /* [] */ printf(" Unknown class\n"); /* [] */ } /* [] */ /* [] */ if (*class != 'U') { /* [] */ /* [] */ printf(" Comparison of RMS-norms of residual\n"); /* [] */ } else { /* [] */ /* [] */ printf(" RMS-norms of residual\n"); /* [] */ } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ /* [] */ if (*class == 'U') { /* [] */ /* [] */ double _imopVarPre427; /* [] */ _imopVarPre427 = xcr[m]; /* [] */ printf(" %2d %20.13e\n", m, _imopVarPre427); /* [] */ } else { /* [] */ /* [] */ /* [] */ if (xcrdif[m] > epsilon) { /* [] */ /* [] */ *verified = 0; /* [] */ double _imopVarPre431; /* [] */ double _imopVarPre432; /* [] */ double _imopVarPre433; /* [] */ _imopVarPre431 = xcrdif[m]; /* [] */ _imopVarPre432 = xcrref[m]; /* [] */ _imopVarPre433 = xcr[m]; /* [] */ printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre433, _imopVarPre432, _imopVarPre431); /* [] */ } else { /* [] */ /* [] */ double _imopVarPre437; /* [] */ double _imopVarPre438; /* [] */ double _imopVarPre439; /* [] */ _imopVarPre437 = xcrdif[m]; /* [] */ _imopVarPre438 = xcrref[m]; /* [] */ _imopVarPre439 = xcr[m]; /* [] */ printf(" %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre439, _imopVarPre438, _imopVarPre437); /* [] */ } } } /* [] */ /* [] */ if (*class != 'U') { /* [] */ /* [] */ printf(" Comparison of RMS-norms of solution error\n"); /* [] */ } else { /* [] */ /* [] */ printf(" RMS-norms of solution error\n"); /* [] */ } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ /* [] */ if (*class == 'U') { /* [] */ /* [] */ double _imopVarPre441; /* [] */ _imopVarPre441 = xce[m]; /* [] */ printf(" %2d %20.13e\n", m, _imopVarPre441); /* [] */ } else { /* [] */ /* [] */ /* [] */ if (xcedif[m] > epsilon) { /* [] */ /* [] */ *verified = 0; /* [] */ double _imopVarPre445; /* [] */ double _imopVarPre446; /* [] */ double _imopVarPre447; /* [] */ _imopVarPre445 = xcedif[m]; /* [] */ _imopVarPre446 = xceref[m]; /* [] */ _imopVarPre447 = xce[m]; /* [] */ printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre447, _imopVarPre446, _imopVarPre445); /* [] */ } else { /* [] */ /* [] */ double _imopVarPre451; /* [] */ double _imopVarPre452; /* [] */ double _imopVarPre453; /* [] */ _imopVarPre451 = xcedif[m]; /* [] */ _imopVarPre452 = xceref[m]; /* [] */ _imopVarPre453 = xce[m]; /* [] */ printf(" %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre453, _imopVarPre452, _imopVarPre451); /* [] */ } } } /* [] */ /* [] */ if (*class != 'U') { /* [] */ /* [] */ printf(" Comparison of surface integral\n"); /* [] */ } else { /* [] */ /* [] */ printf(" Surface integral\n"); /* [] */ } /* [] */ /* [] */ if (*class == 'U') { /* [] */ /* [] */ printf(" %20.13e\n", xci); /* [] */ } else { /* [] */ /* [] */ /* [] */ if (xcidif > epsilon) { /* [] */ /* [] */ *verified = 0; /* [] */ printf(" FAILURE: %20.13e%20.13e%20.13e\n", xci, xciref, xcidif); /* [] */ } else { /* [] */ /* [] */ printf(" %20.13e%20.13e%20.13e\n", xci, xciref, xcidif); /* [] */ } } /* [] */ /* [] */ if (*class == 'U') { /* [] */ /* [] */ printf(" No reference values provided\n"); /* [] */ /* [] */ printf(" No verification performed\n"); /* [] */ } else { /* [] */ /* [] */ /* [] */ if (*verified) { /* [] */ /* [] */ printf(" Verification Successful\n"); /* [] */ } else { /* [] */ /* [] */ printf(" Verification failed\n"); /* [] */ } } }
/* [] */ typedef long long __int64_t; /* [] */ typedef __int64_t __darwin_off_t; /* [] */ typedef __darwin_off_t fpos_t; /* [] */ struct __sbuf { unsigned char *_base; int _size; }; /* [] */ struct __sFILEX; /* [] */ struct __sFILE { unsigned char *_p; int _r; int _w; short _flags; short _file; struct __sbuf _bf; int _lbfsize; void *_cookie; int (*_close) (void *); int (*_read) (void *, char *, int); fpos_t(*_seek) (void *, fpos_t, int); int (*_write) (void *, const char *, int); struct __sbuf _ub; struct __sFILEX *_extra; int _ur; unsigned char _ubuf[3]; unsigned char _nbuf[1]; struct __sbuf _lb; int _blksize; fpos_t _offset; }; /* [] */ typedef struct __sFILE FILE; /* [] */ int fclose(FILE *); /* [] */ int fgetc(FILE *); /* [] */ FILE *fopen(const char *restrict __filename, const char *restrict __mode); /* [] */ int fscanf(FILE * restrict, const char *restrict,...); /* [] */ int printf(const char *restrict,...); /* [] */ void exit(int); /* [] */ extern double fabs(double); /* [] */ extern double sqrt(double); /* [] */ extern int omp_get_num_threads(void); /* [] */ typedef int boolean; /* [] */ extern void timer_clear(int); /* [] */ extern void timer_start(int); /* [] */ extern void timer_stop(int); /* [] */ extern double timer_read(int); /* [] */ extern void c_print_results(char *name, char class, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags, char *rand); /* [] */ static int nx; /* [] */ static int ny; /* [] */ static int nz; /* [] */ static int nx0; /* [] */ static int ny0; /* [] */ static int nz0; /* [] */ static int ist; /* [] */ static int iend; /* [] */ static int jst; /* [] */ static int jend; /* [] */ static int ii1; /* [] */ static int ii2; /* [] */ static int ji1; /* [] */ static int ji2; /* [] */ static int ki1; /* [] */ static int ki2; /* [] */ static double dxi; /* [] */ static double deta; /* [] */ static double dzeta; /* [] */ static double tx1; /* [] */ static double tx2; /* [] */ static double tx3; /* [] */ static double ty1; /* [] */ static double ty2; /* [] */ static double ty3; /* [] */ static double tz1; /* [] */ static double tz2; /* [] */ static double tz3; /* [] */ static double dx1; /* [] */ static double dx2; /* [] */ static double dx3; /* [] */ static double dx4; /* [] */ static double dx5; /* [] */ static double dy1; /* [] */ static double dy2; /* [] */ static double dy3; /* [] */ static double dy4; /* [] */ static double dy5; /* [] */ static double dz1; /* [] */ static double dz2; /* [] */ static double dz3; /* [] */ static double dz4; /* [] */ static double dz5; /* [] */ static double dssp; /* [] */ static double u[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [] */ static double rsd[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [] */ static double frct[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [] */ static double flux[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [] */ static int ipr; /* [] */ static int inorm; /* [] */ static int itmax; /* [] */ static double dt; /* [] */ static double omega; /* [] */ static double tolrsd[5]; /* [] */ static double rsdnm[5]; /* [] */ static double errnm[5]; /* [] */ static double frc; /* [] */ static double a[12][12][5][5]; /* [] */ static double b[12][12][5][5]; /* [] */ static double c[12][12][5][5]; /* [] */ static double d[12][12][5][5]; /* [] */ static double ce[5][13]; /* [] */ static double maxtime; /* [] */ static boolean flag[12 / 2 * 2 + 1]; /* [] */ static void blts(int nx, int ny, int nz, int k, double omega, double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5], double ldz[12][12][5][5], double ldy[12][12][5][5], double ldx[12][12][5][5], double d[12][12][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0); /* [] */ static void buts(int nx, int ny, int nz, int k, double omega, double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5], double tv[12][12][5], double d[12][12][5][5], double udx[12][12][5][5], double udy[12][12][5][5], double udz[12][12][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0); /* [] */ static void domain(void); /* [] */ static void erhs(void); /* [] */ static void error(void); /* [] */ static void exact(int i, int j, int k, double u000ijk[5]); /* [] */ static void jacld(int k); /* [] */ static void jacu(int k); /* [] */ static void l2norm(int nx0, int ny0, int nz0, int ist, int iend, int jst, int jend, double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5], double sum[5]); /* [] */ static void pintgr(void); /* [] */ static void read_input(void); /* [] */ static void rhs(void); /* [] */ static void setbv(void); /* [] */ static void setcoeff(void); /* [] */ static void setiv(void); /* [] */ static void ssor(void); /* [] */ static void verify(double xcr[5], double xce[5], double xci, char *class, boolean * verified); /* [] */ /* [] */ /* [] */ int main(int argc, char **argv) { /* [] */ /* [] */ char class; /* [] */ boolean verified; /* [] */ double mflops; /* [] */ int nthreads = 1; /* [] */ read_input(); /* [] */ /* [] */ domain(); /* [] */ /* [] */ setcoeff(); /* [] */ /* [1] */ #pragma omp parallel { /* [1] */ /* [1] */ int i; /* [1] */ int j; /* [1] */ int k; /* [1] */ int iglob; /* [1] */ int jglob; /* [1] */ #pragma omp for nowait /* [1] */ /* [1] */ /* [1] */ for (i = 0; i < nx; i++) { /* [1] */ /* [1] */ iglob = i; /* [1] */ /* [1] */ /* [1] */ /* [1] */ for (j = 0; j < ny; j++) { /* [1] */ /* [1] */ jglob = j; /* [1] */ double *_imopVarPre239; /* [1] */ _imopVarPre239 = &u[i][j][0][0]; /* [1] */ exact(iglob, jglob, 0, _imopVarPre239); /* [1] */ /* [1] */ double *_imopVarPre242; /* [1] */ int _imopVarPre243; /* [1] */ _imopVarPre242 = &u[i][j][nz - 1][0]; /* [1] */ _imopVarPre243 = nz - 1; /* [1] */ exact(iglob, jglob, _imopVarPre243, _imopVarPre242); /* [1] */ } } /* [1] */ // #pragma omp dummyFlush BARRIER_START /* [1] */ #pragma omp barrier /* [2] */ #pragma omp for nowait /* [2] */ /* [2] */ /* [2] */ for (i = 0; i < nx; i++) { /* [2] */ /* [2] */ iglob = i; /* [2] */ /* [2] */ /* [2] */ /* [2] */ for (k = 0; k < nz; k++) { /* [2] */ /* [2] */ double *_imopVarPre245; /* [2] */ _imopVarPre245 = &u[i][0][k][0]; /* [2] */ exact(iglob, 0, k, _imopVarPre245); /* [2] */ } } /* [2] */ // #pragma omp dummyFlush BARRIER_START /* [2] */ #pragma omp barrier /* [3] */ #pragma omp for nowait /* [3] */ /* [3] */ /* [3] */ for (i = 0; i < nx; i++) { /* [3] */ /* [3] */ iglob = i; /* [3] */ /* [3] */ /* [3] */ /* [3] */ for (k = 0; k < nz; k++) { /* [3] */ /* [3] */ double *_imopVarPre248; /* [3] */ int _imopVarPre249; /* [3] */ _imopVarPre248 = &u[i][ny - 1][k][0]; /* [3] */ _imopVarPre249 = ny0 - 1; /* [3] */ exact(iglob, _imopVarPre249, k, _imopVarPre248); /* [3] */ } } /* [3] */ // #pragma omp dummyFlush BARRIER_START /* [3] */ #pragma omp barrier /* [4] */ #pragma omp for nowait /* [4] */ /* [4] */ /* [4] */ for (j = 0; j < ny; j++) { /* [4] */ /* [4] */ jglob = j; /* [4] */ /* [4] */ /* [4] */ /* [4] */ for (k = 0; k < nz; k++) { /* [4] */ /* [4] */ double *_imopVarPre251; /* [4] */ _imopVarPre251 = &u[0][j][k][0]; /* [4] */ exact(0, jglob, k, _imopVarPre251); /* [4] */ } } /* [4] */ // #pragma omp dummyFlush BARRIER_START /* [4] */ #pragma omp barrier /* [5] */ #pragma omp for nowait /* [5] */ /* [5] */ /* [5] */ for (j = 0; j < ny; j++) { /* [5] */ /* [5] */ jglob = j; /* [5] */ /* [5] */ /* [5] */ /* [5] */ for (k = 0; k < nz; k++) { /* [5] */ /* [5] */ double *_imopVarPre254; /* [5] */ int _imopVarPre255; /* [5] */ _imopVarPre254 = &u[nx - 1][j][k][0]; /* [5] */ _imopVarPre255 = nx0 - 1; /* [5] */ exact(_imopVarPre255, jglob, k, _imopVarPre254); /* [5] */ } } } /* [6] */ #pragma omp parallel { /* [6] */ /* [6] */ int i; /* [6] */ int j; /* [6] */ int k; /* [6] */ int m; /* [6] */ int iglob; /* [6] */ int jglob; /* [6] */ double xi; /* [6] */ double eta; /* [6] */ double zeta; /* [6] */ double pxi; /* [6] */ double peta; /* [6] */ double pzeta; /* [6] */ double ue_1jk[5]; /* [6] */ double ue_nx0jk[5]; /* [6] */ double ue_i1k[5]; /* [6] */ double ue_iny0k[5]; /* [6] */ double ue_ij1[5]; /* [6] */ double ue_ijnz[5]; /* [6] */ #pragma omp for nowait /* [6] */ /* [6] */ /* [6] */ for (j = 0; j < ny; j++) { /* [6] */ /* [6] */ jglob = j; /* [6] */ /* [6] */ /* [6] */ /* [6] */ for (k = 1; k < nz - 1; k++) { /* [6] */ /* [6] */ zeta = ((double)k) / (nz - 1); /* [6] */ int _imopVarPre361; /* [6] */ _imopVarPre361 = jglob != 0; /* [6] */ /* [6] */ if (_imopVarPre361) { /* [6] */ /* [6] */ _imopVarPre361 = jglob != ny0 - 1; } /* [6] */ /* [6] */ if (_imopVarPre361) { /* [6] */ /* [6] */ eta = ((double)jglob) / (ny0 - 1); /* [6] */ /* [6] */ /* [6] */ /* [6] */ for (i = 0; i < nx; i++) { /* [6] */ /* [6] */ iglob = i; /* [6] */ int _imopVarPre363; /* [6] */ _imopVarPre363 = iglob != 0; /* [6] */ /* [6] */ if (_imopVarPre363) { /* [6] */ /* [6] */ _imopVarPre363 = iglob != nx0 - 1; } /* [6] */ /* [6] */ if (_imopVarPre363) { /* [6] */ /* [6] */ xi = ((double)iglob) / (nx0 - 1); /* [6] */ exact(0, jglob, k, ue_1jk); /* [6] */ /* [6] */ int _imopVarPre365; /* [6] */ _imopVarPre365 = nx0 - 1; /* [6] */ exact(_imopVarPre365, jglob, k, ue_nx0jk); /* [6] */ /* [6] */ exact(iglob, 0, k, ue_i1k); /* [6] */ /* [6] */ int _imopVarPre367; /* [6] */ _imopVarPre367 = ny0 - 1; /* [6] */ exact(iglob, _imopVarPre367, k, ue_iny0k); /* [6] */ /* [6] */ exact(iglob, jglob, 0, ue_ij1); /* [6] */ /* [6] */ int _imopVarPre369; /* [6] */ _imopVarPre369 = nz - 1; /* [6] */ exact(iglob, jglob, _imopVarPre369, ue_ijnz); /* [6] */ /* [6] */ /* [6] */ /* [6] */ /* [6] */ for (m = 0; m < 5; m++) { /* [6] */ /* [6] */ pxi = (1.0 - xi) * ue_1jk[m] + xi * ue_nx0jk[m]; /* [6] */ peta = (1.0 - eta) * ue_i1k[m] + eta * ue_iny0k[m]; /* [6] */ pzeta = (1.0 - zeta) * ue_ij1[m] + zeta * ue_ijnz[m]; /* [6] */ u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta; } } } } } } } /* [6, 7] */ #pragma omp parallel { /* [6, 7] */ /* [6, 7] */ int i; /* [6, 7] */ int j; /* [6, 7] */ int k; /* [6, 7] */ int m; /* [6, 7] */ int iglob; /* [6, 7] */ int jglob; /* [6, 7] */ int L1; /* [6, 7] */ int L2; /* [6, 7] */ int ist1; /* [6, 7] */ int iend1; /* [6, 7] */ int jst1; /* [6, 7] */ int jend1; /* [6, 7] */ double dsspm; /* [6, 7] */ double xi; /* [6, 7] */ double eta; /* [6, 7] */ double zeta; /* [6, 7] */ double q; /* [6, 7] */ double u21; /* [6, 7] */ double u31; /* [6, 7] */ double u41; /* [6, 7] */ double tmp; /* [6, 7] */ double u21i; /* [6, 7] */ double u31i; /* [6, 7] */ double u41i; /* [6, 7] */ double u51i; /* [6, 7] */ double u21j; /* [6, 7] */ double u31j; /* [6, 7] */ double u41j; /* [6, 7] */ double u51j; /* [6, 7] */ double u21k; /* [6, 7] */ double u31k; /* [6, 7] */ double u41k; /* [6, 7] */ double u51k; /* [6, 7] */ double u21im1; /* [6, 7] */ double u31im1; /* [6, 7] */ double u41im1; /* [6, 7] */ double u51im1; /* [6, 7] */ double u21jm1; /* [6, 7] */ double u31jm1; /* [6, 7] */ double u41jm1; /* [6, 7] */ double u51jm1; /* [6, 7] */ double u21km1; /* [6, 7] */ double u31km1; /* [6, 7] */ double u41km1; /* [6, 7] */ double u51km1; /* [6, 7] */ dsspm = dssp; /* [6, 7] */ #pragma omp for nowait /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ for (i = 0; i < nx; i++) { /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ for (j = 0; j < ny; j++) { /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ for (k = 0; k < nz; k++) { /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ for (m = 0; m < 5; m++) { /* [6, 7] */ /* [6, 7] */ frct[i][j][k][m] = 0.0; } } } } /* [6, 7] */ #pragma omp for nowait /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ for (i = 0; i < nx; i++) { /* [6, 7] */ /* [6, 7] */ iglob = i; /* [6, 7] */ xi = ((double)iglob) / (nx0 - 1); /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ for (j = 0; j < ny; j++) { /* [6, 7] */ /* [6, 7] */ jglob = j; /* [6, 7] */ eta = ((double)jglob) / (ny0 - 1); /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ for (k = 0; k < nz; k++) { /* [6, 7] */ /* [6, 7] */ zeta = ((double)k) / (nz - 1); /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ /* [6, 7] */ for (m = 0; m < 5; m++) { /* [6, 7] */ /* [6, 7] */ rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } } } /* [6, 7] */ // #pragma omp dummyFlush BARRIER_START /* [6, 7] */ #pragma omp barrier /* [6, 8] */ L1 = 0; /* [6, 8] */ L2 = nx - 1; /* [6, 8] */ #pragma omp for nowait /* [6, 8] */ /* [6, 8] */ /* [6, 8] */ for (i = L1; i <= L2; i++) { /* [6, 8] */ /* [6, 8] */ /* [6, 8] */ /* [6, 8] */ /* [6, 8] */ for (j = jst; j <= jend; j++) { /* [6, 8] */ /* [6, 8] */ /* [6, 8] */ /* [6, 8] */ /* [6, 8] */ for (k = 1; k < nz - 1; k++) { /* [6, 8] */ /* [6, 8] */ flux[i][j][k][0] = rsd[i][j][k][1]; /* [6, 8] */ u21 = rsd[i][j][k][1] / rsd[i][j][k][0]; /* [6, 8] */ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /* [6, 8] */ flux[i][j][k][1] = rsd[i][j][k][1] * u21 + 0.40e+00 * (rsd[i][j][k][4] - q); /* [6, 8] */ flux[i][j][k][2] = rsd[i][j][k][2] * u21; /* [6, 8] */ flux[i][j][k][3] = rsd[i][j][k][3] * u21; /* [6, 8] */ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21; } } } /* [6, 8] */ // #pragma omp dummyFlush BARRIER_START /* [6, 8] */ #pragma omp barrier /* [6, 9] */ #pragma omp for nowait /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (j = jst; j <= jend; j++) { /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (k = 1; k <= nz - 2; k++) { /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (i = ist; i <= iend; i++) { /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (m = 0; m < 5; m++) { /* [6, 9] */ /* [6, 9] */ frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]); } } /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (i = ist; i <= L2; i++) { /* [6, 9] */ /* [6, 9] */ tmp = 1.0 / rsd[i][j][k][0]; /* [6, 9] */ u21i = tmp * rsd[i][j][k][1]; /* [6, 9] */ u31i = tmp * rsd[i][j][k][2]; /* [6, 9] */ u41i = tmp * rsd[i][j][k][3]; /* [6, 9] */ u51i = tmp * rsd[i][j][k][4]; /* [6, 9] */ tmp = 1.0 / rsd[i - 1][j][k][0]; /* [6, 9] */ u21im1 = tmp * rsd[i - 1][j][k][1]; /* [6, 9] */ u31im1 = tmp * rsd[i - 1][j][k][2]; /* [6, 9] */ u41im1 = tmp * rsd[i - 1][j][k][3]; /* [6, 9] */ u51im1 = tmp * rsd[i - 1][j][k][4]; /* [6, 9] */ flux[i][j][k][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /* [6, 9] */ flux[i][j][k][2] = tx3 * (u31i - u31im1); /* [6, 9] */ flux[i][j][k][3] = tx3 * (u41i - u41im1); /* [6, 9] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * ((u21i * u21i + u31i * u31i + u41i * u41i) - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + (1.0 / 6.0) * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (i = ist; i <= iend; i++) { /* [6, 9] */ /* [6, 9] */ frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * (rsd[i - 1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i + 1][j][k][0]); /* [6, 9] */ frct[i][j][k][1] = frct[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (rsd[i - 1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i + 1][j][k][1]); /* [6, 9] */ frct[i][j][k][2] = frct[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (rsd[i - 1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i + 1][j][k][2]); /* [6, 9] */ frct[i][j][k][3] = frct[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (rsd[i - 1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i + 1][j][k][3]); /* [6, 9] */ frct[i][j][k][4] = frct[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (rsd[i - 1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i + 1][j][k][4]); } /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (m = 0; m < 5; m++) { /* [6, 9] */ /* [6, 9] */ frct[1][j][k][m] = frct[1][j][k][m] - dsspm * (+5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m]); /* [6, 9] */ frct[2][j][k][m] = frct[2][j][k][m] - dsspm * (-4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m]); } /* [6, 9] */ ist1 = 3; /* [6, 9] */ iend1 = nx - 4; /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (i = ist1; i <= iend1; i++) { /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (m = 0; m < 5; m++) { /* [6, 9] */ /* [6, 9] */ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]); } } /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ /* [6, 9] */ for (m = 0; m < 5; m++) { /* [6, 9] */ /* [6, 9] */ frct[nx - 3][j][k][m] = frct[nx - 3][j][k][m] - dsspm * (rsd[nx - 5][j][k][m] - 4.0 * rsd[nx - 4][j][k][m] + 6.0 * rsd[nx - 3][j][k][m] - 4.0 * rsd[nx - 2][j][k][m]); /* [6, 9] */ frct[nx - 2][j][k][m] = frct[nx - 2][j][k][m] - dsspm * (rsd[nx - 4][j][k][m] - 4.0 * rsd[nx - 3][j][k][m] + 5.0 * rsd[nx - 2][j][k][m]); } } } /* [6, 9] */ // #pragma omp dummyFlush BARRIER_START /* [6, 9] */ #pragma omp barrier /* [6, 10] */ L1 = 0; /* [6, 10] */ L2 = ny - 1; /* [6, 10] */ #pragma omp for nowait /* [6, 10] */ /* [6, 10] */ /* [6, 10] */ for (i = ist; i <= iend; i++) { /* [6, 10] */ /* [6, 10] */ /* [6, 10] */ /* [6, 10] */ /* [6, 10] */ for (j = L1; j <= L2; j++) { /* [6, 10] */ /* [6, 10] */ /* [6, 10] */ /* [6, 10] */ /* [6, 10] */ for (k = 1; k <= nz - 2; k++) { /* [6, 10] */ /* [6, 10] */ flux[i][j][k][0] = rsd[i][j][k][2]; /* [6, 10] */ u31 = rsd[i][j][k][2] / rsd[i][j][k][0]; /* [6, 10] */ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /* [6, 10] */ flux[i][j][k][1] = rsd[i][j][k][1] * u31; /* [6, 10] */ flux[i][j][k][2] = rsd[i][j][k][2] * u31 + 0.40e+00 * (rsd[i][j][k][4] - q); /* [6, 10] */ flux[i][j][k][3] = rsd[i][j][k][3] * u31; /* [6, 10] */ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31; } } } /* [6, 10] */ // #pragma omp dummyFlush BARRIER_START /* [6, 10] */ #pragma omp barrier /* [6, 11] */ #pragma omp for nowait /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (i = ist; i <= iend; i++) { /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (k = 1; k <= nz - 2; k++) { /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (j = jst; j <= jend; j++) { /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (m = 0; m < 5; m++) { /* [6, 11] */ /* [6, 11] */ frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]); } } /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (j = jst; j <= L2; j++) { /* [6, 11] */ /* [6, 11] */ tmp = 1.0 / rsd[i][j][k][0]; /* [6, 11] */ u21j = tmp * rsd[i][j][k][1]; /* [6, 11] */ u31j = tmp * rsd[i][j][k][2]; /* [6, 11] */ u41j = tmp * rsd[i][j][k][3]; /* [6, 11] */ u51j = tmp * rsd[i][j][k][4]; /* [6, 11] */ tmp = 1.0 / rsd[i][j - 1][k][0]; /* [6, 11] */ u21jm1 = tmp * rsd[i][j - 1][k][1]; /* [6, 11] */ u31jm1 = tmp * rsd[i][j - 1][k][2]; /* [6, 11] */ u41jm1 = tmp * rsd[i][j - 1][k][3]; /* [6, 11] */ u51jm1 = tmp * rsd[i][j - 1][k][4]; /* [6, 11] */ flux[i][j][k][1] = ty3 * (u21j - u21jm1); /* [6, 11] */ flux[i][j][k][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /* [6, 11] */ flux[i][j][k][3] = ty3 * (u41j - u41jm1); /* [6, 11] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * ((u21j * u21j + u31j * u31j + u41j * u41j) - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + (1.0 / 6.0) * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (j = jst; j <= jend; j++) { /* [6, 11] */ /* [6, 11] */ frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * (rsd[i][j - 1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j + 1][k][0]); /* [6, 11] */ frct[i][j][k][1] = frct[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (rsd[i][j - 1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j + 1][k][1]); /* [6, 11] */ frct[i][j][k][2] = frct[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (rsd[i][j - 1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j + 1][k][2]); /* [6, 11] */ frct[i][j][k][3] = frct[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (rsd[i][j - 1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j + 1][k][3]); /* [6, 11] */ frct[i][j][k][4] = frct[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (rsd[i][j - 1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j + 1][k][4]); } /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (m = 0; m < 5; m++) { /* [6, 11] */ /* [6, 11] */ frct[i][1][k][m] = frct[i][1][k][m] - dsspm * (+5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m]); /* [6, 11] */ frct[i][2][k][m] = frct[i][2][k][m] - dsspm * (-4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m]); } /* [6, 11] */ jst1 = 3; /* [6, 11] */ jend1 = ny - 4; /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (j = jst1; j <= jend1; j++) { /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (m = 0; m < 5; m++) { /* [6, 11] */ /* [6, 11] */ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]); } } /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ /* [6, 11] */ for (m = 0; m < 5; m++) { /* [6, 11] */ /* [6, 11] */ frct[i][ny - 3][k][m] = frct[i][ny - 3][k][m] - dsspm * (rsd[i][ny - 5][k][m] - 4.0 * rsd[i][ny - 4][k][m] + 6.0 * rsd[i][ny - 3][k][m] - 4.0 * rsd[i][ny - 2][k][m]); /* [6, 11] */ frct[i][ny - 2][k][m] = frct[i][ny - 2][k][m] - dsspm * (rsd[i][ny - 4][k][m] - 4.0 * rsd[i][ny - 3][k][m] + 5.0 * rsd[i][ny - 2][k][m]); } } } /* [6, 11] */ // #pragma omp dummyFlush BARRIER_START /* [6, 11] */ #pragma omp barrier /* [6, 12] */ #pragma omp for nowait /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (i = ist; i <= iend; i++) { /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (j = jst; j <= jend; j++) { /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (k = 0; k <= nz - 1; k++) { /* [6, 12] */ /* [6, 12] */ flux[i][j][k][0] = rsd[i][j][k][3]; /* [6, 12] */ u41 = rsd[i][j][k][3] / rsd[i][j][k][0]; /* [6, 12] */ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /* [6, 12] */ flux[i][j][k][1] = rsd[i][j][k][1] * u41; /* [6, 12] */ flux[i][j][k][2] = rsd[i][j][k][2] * u41; /* [6, 12] */ flux[i][j][k][3] = rsd[i][j][k][3] * u41 + 0.40e+00 * (rsd[i][j][k][4] - q); /* [6, 12] */ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41; } /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (k = 1; k <= nz - 2; k++) { /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (m = 0; m < 5; m++) { /* [6, 12] */ /* [6, 12] */ frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]); } } /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (k = 1; k <= nz - 1; k++) { /* [6, 12] */ /* [6, 12] */ tmp = 1.0 / rsd[i][j][k][0]; /* [6, 12] */ u21k = tmp * rsd[i][j][k][1]; /* [6, 12] */ u31k = tmp * rsd[i][j][k][2]; /* [6, 12] */ u41k = tmp * rsd[i][j][k][3]; /* [6, 12] */ u51k = tmp * rsd[i][j][k][4]; /* [6, 12] */ tmp = 1.0 / rsd[i][j][k - 1][0]; /* [6, 12] */ u21km1 = tmp * rsd[i][j][k - 1][1]; /* [6, 12] */ u31km1 = tmp * rsd[i][j][k - 1][2]; /* [6, 12] */ u41km1 = tmp * rsd[i][j][k - 1][3]; /* [6, 12] */ u51km1 = tmp * rsd[i][j][k - 1][4]; /* [6, 12] */ flux[i][j][k][1] = tz3 * (u21k - u21km1); /* [6, 12] */ flux[i][j][k][2] = tz3 * (u31k - u31km1); /* [6, 12] */ flux[i][j][k][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /* [6, 12] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * ((u21k * u21k + u31k * u31k + u41k * u41k) - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + (1.0 / 6.0) * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (k = 1; k <= nz - 2; k++) { /* [6, 12] */ /* [6, 12] */ frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * (rsd[i][j][k + 1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k - 1][0]); /* [6, 12] */ frct[i][j][k][1] = frct[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (rsd[i][j][k + 1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k - 1][1]); /* [6, 12] */ frct[i][j][k][2] = frct[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (rsd[i][j][k + 1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k - 1][2]); /* [6, 12] */ frct[i][j][k][3] = frct[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (rsd[i][j][k + 1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k - 1][3]); /* [6, 12] */ frct[i][j][k][4] = frct[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (rsd[i][j][k + 1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k - 1][4]); } /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (m = 0; m < 5; m++) { /* [6, 12] */ /* [6, 12] */ frct[i][j][1][m] = frct[i][j][1][m] - dsspm * (+5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m]); /* [6, 12] */ frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (-4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]); } /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (k = 3; k <= nz - 4; k++) { /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (m = 0; m < 5; m++) { /* [6, 12] */ /* [6, 12] */ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]); } } /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ /* [6, 12] */ for (m = 0; m < 5; m++) { /* [6, 12] */ /* [6, 12] */ frct[i][j][nz - 3][m] = frct[i][j][nz - 3][m] - dsspm * (rsd[i][j][nz - 5][m] - 4.0 * rsd[i][j][nz - 4][m] + 6.0 * rsd[i][j][nz - 3][m] - 4.0 * rsd[i][j][nz - 2][m]); /* [6, 12] */ frct[i][j][nz - 2][m] = frct[i][j][nz - 2][m] - dsspm * (rsd[i][j][nz - 4][m] - 4.0 * rsd[i][j][nz - 3][m] + 5.0 * rsd[i][j][nz - 2][m]); } } } } /* [13] */ #pragma omp parallel { /* [13] */ /* [13] */ #pragma omp master { /* [13] */ /* [13] */ nthreads = omp_get_num_threads(); /* [13] */ } } /* [13] */ int i; /* [13] */ int j; /* [13] */ int k; /* [13] */ int m; /* [13] */ int istep; /* [13] */ double tmp; /* [13] */ double delunm[5]; /* [13] */ double tv[12][12][5]; /* [13] */ tmp = 1.0 / (omega * (2.0 - omega)); /* [13, 14] */ #pragma omp parallel private(i, j, k, m) { /* [13, 14] */ /* [13, 14] */ #pragma omp for nowait /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ for (i = 0; i < 12; i++) { /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ for (j = 0; j < 12; j++) { /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ for (k = 0; k < 5; k++) { /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ for (m = 0; m < 5; m++) { /* [13, 14] */ /* [13, 14] */ a[i][j][k][m] = 0.0; /* [13, 14] */ b[i][j][k][m] = 0.0; /* [13, 14] */ c[i][j][k][m] = 0.0; /* [13, 14] */ d[i][j][k][m] = 0.0; } } } } } /* [13, 14, 15] */ #pragma omp parallel { /* [13, 14, 15] */ /* [13, 14, 15] */ int i_imopVarPre84; /* [13, 14, 15] */ int j_imopVarPre85; /* [13, 14, 15] */ int k_imopVarPre86; /* [13, 14, 15] */ int m_imopVarPre87; /* [13, 14, 15] */ int L1; /* [13, 14, 15] */ int L2; /* [13, 14, 15] */ int ist1; /* [13, 14, 15] */ int iend1; /* [13, 14, 15] */ int jst1; /* [13, 14, 15] */ int jend1; /* [13, 14, 15] */ double q; /* [13, 14, 15] */ double u21; /* [13, 14, 15] */ double u31; /* [13, 14, 15] */ double u41; /* [13, 14, 15] */ double tmp_imopVarPre88; /* [13, 14, 15] */ double u21i; /* [13, 14, 15] */ double u31i; /* [13, 14, 15] */ double u41i; /* [13, 14, 15] */ double u51i; /* [13, 14, 15] */ double u21j; /* [13, 14, 15] */ double u31j; /* [13, 14, 15] */ double u41j; /* [13, 14, 15] */ double u51j; /* [13, 14, 15] */ double u21k; /* [13, 14, 15] */ double u31k; /* [13, 14, 15] */ double u41k; /* [13, 14, 15] */ double u51k; /* [13, 14, 15] */ double u21im1; /* [13, 14, 15] */ double u31im1; /* [13, 14, 15] */ double u41im1; /* [13, 14, 15] */ double u51im1; /* [13, 14, 15] */ double u21jm1; /* [13, 14, 15] */ double u31jm1; /* [13, 14, 15] */ double u41jm1; /* [13, 14, 15] */ double u51jm1; /* [13, 14, 15] */ double u21km1; /* [13, 14, 15] */ double u31km1; /* [13, 14, 15] */ double u41km1; /* [13, 14, 15] */ double u51km1; /* [13, 14, 15] */ #pragma omp for nowait /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ for (i_imopVarPre84 = 0; i_imopVarPre84 <= nx - 1; i_imopVarPre84++) { /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ for (j_imopVarPre85 = 0; j_imopVarPre85 <= ny - 1; j_imopVarPre85++) { /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ /* [13, 14, 15] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 15] */ /* [13, 14, 15] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = -frct[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]; } } } } /* [13, 14, 15] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 15] */ #pragma omp barrier /* [13, 14, 16] */ L1 = 0; /* [13, 14, 16] */ L2 = nx - 1; /* [13, 14, 16] */ #pragma omp for nowait /* [13, 14, 16] */ /* [13, 14, 16] */ /* [13, 14, 16] */ for (i_imopVarPre84 = L1; i_imopVarPre84 <= L2; i_imopVarPre84++) { /* [13, 14, 16] */ /* [13, 14, 16] */ /* [13, 14, 16] */ /* [13, 14, 16] */ /* [13, 14, 16] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [13, 14, 16] */ /* [13, 14, 16] */ /* [13, 14, 16] */ /* [13, 14, 16] */ /* [13, 14, 16] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [13, 14, 16] */ /* [13, 14, 16] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /* [13, 14, 16] */ u21 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 16] */ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 16] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u21 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /* [13, 14, 16] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u21; /* [13, 14, 16] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u21; /* [13, 14, 16] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u21; } } } /* [13, 14, 16] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 16] */ #pragma omp barrier /* [13, 14, 17] */ #pragma omp for nowait /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 17] */ /* [13, 14, 17] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tx2 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } /* [13, 14, 17] */ L2 = nx - 1; /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= L2; i_imopVarPre84++) { /* [13, 14, 17] */ /* [13, 14, 17] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 17] */ u21i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /* [13, 14, 17] */ u31i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /* [13, 14, 17] */ u41i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /* [13, 14, 17] */ u51i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /* [13, 14, 17] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 17] */ u21im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1]; /* [13, 14, 17] */ u31im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2]; /* [13, 14, 17] */ u41im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3]; /* [13, 14, 17] */ u51im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4]; /* [13, 14, 17] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /* [13, 14, 17] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tx3 * (u31i - u31im1); /* [13, 14, 17] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = tx3 * (u41i - u41im1); /* [13, 14, 17] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [13, 14, 17] */ /* [13, 14, 17] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dx1 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][0]); /* [13, 14, 17] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dx2 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1]); /* [13, 14, 17] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dx3 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2]); /* [13, 14, 17] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dx4 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3]); /* [13, 14, 17] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dx5 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4]); } /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 17] */ /* [13, 14, 17] */ rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); /* [13, 14, 17] */ rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } /* [13, 14, 17] */ ist1 = 3; /* [13, 14, 17] */ iend1 = nx - 4; /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (i_imopVarPre84 = ist1; i_imopVarPre84 <= iend1; i_imopVarPre84++) { /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 17] */ /* [13, 14, 17] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84 - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84 + 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ /* [13, 14, 17] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 17] */ /* [13, 14, 17] */ rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 5][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); /* [13, 14, 17] */ rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } } /* [13, 14, 17] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 17] */ #pragma omp barrier /* [13, 14, 18] */ L1 = 0; /* [13, 14, 18] */ L2 = ny - 1; /* [13, 14, 18] */ #pragma omp for nowait /* [13, 14, 18] */ /* [13, 14, 18] */ /* [13, 14, 18] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [13, 14, 18] */ /* [13, 14, 18] */ /* [13, 14, 18] */ /* [13, 14, 18] */ /* [13, 14, 18] */ for (j_imopVarPre85 = L1; j_imopVarPre85 <= L2; j_imopVarPre85++) { /* [13, 14, 18] */ /* [13, 14, 18] */ /* [13, 14, 18] */ /* [13, 14, 18] */ /* [13, 14, 18] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [13, 14, 18] */ /* [13, 14, 18] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /* [13, 14, 18] */ u31 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 18] */ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 18] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u31; /* [13, 14, 18] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u31 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /* [13, 14, 18] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u31; /* [13, 14, 18] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u31; } } } /* [13, 14, 18] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 18] */ #pragma omp barrier /* [13, 14, 19] */ #pragma omp for nowait /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 19] */ /* [13, 14, 19] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - ty2 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87]); } } /* [13, 14, 19] */ L2 = ny - 1; /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= L2; j_imopVarPre85++) { /* [13, 14, 19] */ /* [13, 14, 19] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 19] */ u21j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /* [13, 14, 19] */ u31j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /* [13, 14, 19] */ u41j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /* [13, 14, 19] */ u51j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /* [13, 14, 19] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0]; /* [13, 14, 19] */ u21jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1]; /* [13, 14, 19] */ u31jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2]; /* [13, 14, 19] */ u41jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3]; /* [13, 14, 19] */ u51jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4]; /* [13, 14, 19] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = ty3 * (u21j - u21jm1); /* [13, 14, 19] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /* [13, 14, 19] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = ty3 * (u41j - u41jm1); /* [13, 14, 19] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [13, 14, 19] */ /* [13, 14, 19] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dy1 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][0]); /* [13, 14, 19] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dy2 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1]); /* [13, 14, 19] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dy3 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2]); /* [13, 14, 19] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dy4 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3]); /* [13, 14, 19] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dy5 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4]); } /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 19] */ /* [13, 14, 19] */ rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87]); /* [13, 14, 19] */ rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][4][k_imopVarPre86][m_imopVarPre87]); } /* [13, 14, 19] */ jst1 = 3; /* [13, 14, 19] */ jend1 = ny - 4; /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (j_imopVarPre85 = jst1; j_imopVarPre85 <= jend1; j_imopVarPre85++) { /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 19] */ /* [13, 14, 19] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85 - 2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85 + 2][k_imopVarPre86][m_imopVarPre87]); } } /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ /* [13, 14, 19] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 19] */ /* [13, 14, 19] */ rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 5][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]); /* [13, 14, 19] */ rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]); } } } /* [13, 14, 19] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 19] */ #pragma omp barrier /* [13, 14, 20] */ #pragma omp for nowait /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /* [13, 14, 20] */ /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /* [13, 14, 20] */ u41 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 20] */ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u41; /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u41; /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u41 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u41; } /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 20] */ /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tz2 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87]); } } /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /* [13, 14, 20] */ /* [13, 14, 20] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [13, 14, 20] */ u21k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /* [13, 14, 20] */ u31k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /* [13, 14, 20] */ u41k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /* [13, 14, 20] */ u51k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /* [13, 14, 20] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0]; /* [13, 14, 20] */ u21km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1]; /* [13, 14, 20] */ u31km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2]; /* [13, 14, 20] */ u41km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3]; /* [13, 14, 20] */ u51km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4]; /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = tz3 * (u21k - u21km1); /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tz3 * (u31k - u31km1); /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /* [13, 14, 20] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [13, 14, 20] */ /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dz1 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][0]); /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dz2 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1]); /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dz3 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2]); /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dz4 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3]); /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dz5 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4]); } /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 20] */ /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87]); /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][4][m_imopVarPre87]); } /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (k_imopVarPre86 = 3; k_imopVarPre86 <= nz - 4; k_imopVarPre86++) { /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 20] */ /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 2][m_imopVarPre87]); } } /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ /* [13, 14, 20] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [13, 14, 20] */ /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 5][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]); /* [13, 14, 20] */ rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]); } } } } /* [13, 14, 21] */ #pragma omp parallel { /* [13, 14, 21] */ /* [13, 14, 21] */ double (*v)[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [13, 14, 21] */ double *sum; /* [13, 14, 21] */ v = rsd; /* [13, 14, 21] */ sum = rsdnm; /* [13, 14, 21] */ int i_imopVarPre75; /* [13, 14, 21] */ int j_imopVarPre76; /* [13, 14, 21] */ int k_imopVarPre77; /* [13, 14, 21] */ int m_imopVarPre78; /* [13, 14, 21] */ double sum0 = 0.0; /* [13, 14, 21] */ double sum1 = 0.0; /* [13, 14, 21] */ double sum2 = 0.0; /* [13, 14, 21] */ double sum3 = 0.0; /* [13, 14, 21] */ double sum4 = 0.0; /* [13, 14, 21] */ #pragma omp single nowait { /* [13, 14, 21] */ /* [13, 14, 21] */ /* [13, 14, 21] */ /* [13, 14, 21] */ /* [13, 14, 21] */ for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) { /* [13, 14, 21] */ /* [13, 14, 21] */ sum[m_imopVarPre78] = 0.0; } } /* [13, 14, 21] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 21] */ #pragma omp barrier /* [13, 14, 22] */ #pragma omp for nowait /* [13, 14, 22] */ /* [13, 14, 22] */ /* [13, 14, 22] */ for (i_imopVarPre75 = ist; i_imopVarPre75 <= iend; i_imopVarPre75++) { /* [13, 14, 22] */ /* [13, 14, 22] */ /* [13, 14, 22] */ /* [13, 14, 22] */ /* [13, 14, 22] */ for (j_imopVarPre76 = jst; j_imopVarPre76 <= jend; j_imopVarPre76++) { /* [13, 14, 22] */ /* [13, 14, 22] */ /* [13, 14, 22] */ /* [13, 14, 22] */ /* [13, 14, 22] */ for (k_imopVarPre77 = 1; k_imopVarPre77 <= nz0 - 2; k_imopVarPre77++) { /* [13, 14, 22] */ /* [13, 14, 22] */ sum0 = sum0 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0]; /* [13, 14, 22] */ sum1 = sum1 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1]; /* [13, 14, 22] */ sum2 = sum2 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2]; /* [13, 14, 22] */ sum3 = sum3 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3]; /* [13, 14, 22] */ sum4 = sum4 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4]; } } } /* [13, 14, 22] */ // #pragma omp dummyFlush CRITICAL_START /* [13, 14, 22] */ #pragma omp critical { /* [13, 14, 22] */ /* [13, 14, 22] */ sum[0] += sum0; /* [13, 14, 22] */ sum[1] += sum1; /* [13, 14, 22] */ sum[2] += sum2; /* [13, 14, 22] */ sum[3] += sum3; /* [13, 14, 22] */ sum[4] += sum4; } /* [13, 14, 22] */ // #pragma omp dummyFlush CRITICAL_END /* [13, 14, 22] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 22] */ #pragma omp barrier /* [13, 14, 23] */ #pragma omp single nowait { /* [13, 14, 23] */ /* [13, 14, 23] */ /* [13, 14, 23] */ /* [13, 14, 23] */ /* [13, 14, 23] */ for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) { /* [13, 14, 23] */ /* [13, 14, 23] */ double _imopVarPre154; /* [13, 14, 23] */ double _imopVarPre155; /* [13, 14, 23] */ _imopVarPre154 = sum[m_imopVarPre78] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /* [13, 14, 23] */ _imopVarPre155 = sqrt(_imopVarPre154); /* [13, 14, 23] */ /* [13, 14, 23] */ sum[m_imopVarPre78] = _imopVarPre155; } } } /* [13, 14] */ timer_clear(1); /* [13, 14] */ /* [13, 14] */ timer_start(1); /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ /* [13, 14] */ for (istep = 1; istep <= itmax; istep++) { /* [13, 14] */ /* [13, 14] */ int _imopVarPre372; /* [13, 14] */ int _imopVarPre370; /* [13, 14] */ int _imopVarPre371; /* [13, 14] */ _imopVarPre370 = istep % 20 == 0; /* [13, 14] */ /* [13, 14] */ if (!_imopVarPre370) { /* [13, 14] */ /* [13, 14] */ _imopVarPre371 = istep == itmax; /* [13, 14] */ /* [13, 14] */ if (!_imopVarPre371) { /* [13, 14] */ /* [13, 14] */ _imopVarPre371 = istep == 1; } /* [13, 14] */ _imopVarPre370 = _imopVarPre371; } /* [13, 14] */ /* [13, 14] */ if (_imopVarPre370) { /* [13, 14] */ /* [13, 14] */ #pragma omp master { /* [13, 14] */ /* [13, 14] */ printf(" Time step %4d\n", istep); /* [13, 14] */ } } /* [13, 14, 24] */ #pragma omp parallel private(istep, i, j, k, m) { /* [13, 14, 24] */ /* [13, 14, 24] */ int _imopVarPre377; /* [13, 14, 24] */ int _imopVarPre378; /* [13, 14, 24] */ int _imopVarPre379; /* [13, 14, 24] */ int _imopVarPre380; /* [13, 14, 24] */ #pragma omp for nowait /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ for (i = ist; i <= iend; i++) { /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ for (j = jst; j <= jend; j++) { /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ for (k = 1; k <= nz - 2; k++) { /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ /* [13, 14, 24] */ for (m = 0; m < 5; m++) { /* [13, 14, 24] */ /* [13, 14, 24] */ rsd[i][j][k][m] = dt * rsd[i][j][k][m]; } } } } /* [13, 14, 24] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 24] */ #pragma omp barrier /* [13, 14, 25] */ /* [13, 14, 25] */ /* [13, 14, 25] */ /* [13, 14, 25] */ for (k = 1; k <= nz - 2; k++) { /* [13, 14, 25] */ /* [13, 14, 25] */ jacld(k); /* [13, 14, 25] */ /* [13, 14, 25] */ blts(nx, ny, nz, k, omega, rsd, a, b, c, d, ist, iend, jst, jend, nx0, ny0); /* [13, 14, 25] */ } /* [13, 14, 25] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 25] */ #pragma omp barrier /* [13, 14, 26] */ /* [13, 14, 26] */ /* [13, 14, 26] */ /* [13, 14, 26] */ for (k = nz - 2; k >= 1; k--) { /* [13, 14, 26] */ /* [13, 14, 26] */ jacu(k); /* [13, 14, 26] */ /* [13, 14, 26] */ buts(nx, ny, nz, k, omega, rsd, tv, d, a, b, c, ist, iend, jst, jend, nx0, ny0); /* [13, 14, 26] */ } /* [13, 14, 26] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 26] */ #pragma omp barrier /* [13, 14, 27] */ #pragma omp for nowait /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ for (i = ist; i <= iend; i++) { /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ for (j = jst; j <= jend; j++) { /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ for (k = 1; k <= nz - 2; k++) { /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ for (m = 0; m < 5; m++) { /* [13, 14, 27] */ /* [13, 14, 27] */ u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m]; } } } } /* [13, 14, 27] */ /* [13, 14, 27] */ if (istep % inorm == 0) { /* [13, 14, 27] */ /* [13, 14, 27] */ double (*v)[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [13, 14, 27] */ double *sum; /* [13, 14, 27] */ v = rsd; /* [13, 14, 27] */ sum = delunm; /* [13, 14, 27] */ int i_imopVarPre89; /* [13, 14, 27] */ int j_imopVarPre90; /* [13, 14, 27] */ int k_imopVarPre91; /* [13, 14, 27] */ int m_imopVarPre92; /* [13, 14, 27] */ double sum0 = 0.0; /* [13, 14, 27] */ double sum1 = 0.0; /* [13, 14, 27] */ double sum2 = 0.0; /* [13, 14, 27] */ double sum3 = 0.0; /* [13, 14, 27] */ double sum4 = 0.0; /* [13, 14, 27] */ #pragma omp single nowait { /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ /* [13, 14, 27] */ for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) { /* [13, 14, 27] */ /* [13, 14, 27] */ sum[m_imopVarPre92] = 0.0; } } /* [13, 14, 27] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 27] */ #pragma omp barrier /* [13, 14, 28] */ #pragma omp for nowait /* [13, 14, 28] */ /* [13, 14, 28] */ /* [13, 14, 28] */ for (i_imopVarPre89 = ist; i_imopVarPre89 <= iend; i_imopVarPre89++) { /* [13, 14, 28] */ /* [13, 14, 28] */ /* [13, 14, 28] */ /* [13, 14, 28] */ /* [13, 14, 28] */ for (j_imopVarPre90 = jst; j_imopVarPre90 <= jend; j_imopVarPre90++) { /* [13, 14, 28] */ /* [13, 14, 28] */ /* [13, 14, 28] */ /* [13, 14, 28] */ /* [13, 14, 28] */ for (k_imopVarPre91 = 1; k_imopVarPre91 <= nz0 - 2; k_imopVarPre91++) { /* [13, 14, 28] */ /* [13, 14, 28] */ sum0 = sum0 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0]; /* [13, 14, 28] */ sum1 = sum1 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1]; /* [13, 14, 28] */ sum2 = sum2 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2]; /* [13, 14, 28] */ sum3 = sum3 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3]; /* [13, 14, 28] */ sum4 = sum4 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4]; } } } /* [13, 14, 28] */ // #pragma omp dummyFlush CRITICAL_START /* [13, 14, 28] */ #pragma omp critical { /* [13, 14, 28] */ /* [13, 14, 28] */ sum[0] += sum0; /* [13, 14, 28] */ sum[1] += sum1; /* [13, 14, 28] */ sum[2] += sum2; /* [13, 14, 28] */ sum[3] += sum3; /* [13, 14, 28] */ sum[4] += sum4; } /* [13, 14, 28] */ // #pragma omp dummyFlush CRITICAL_END /* [13, 14, 28] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 28] */ #pragma omp barrier /* [13, 14, 29] */ #pragma omp single nowait { /* [13, 14, 29] */ /* [13, 14, 29] */ /* [13, 14, 29] */ /* [13, 14, 29] */ /* [13, 14, 29] */ for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) { /* [13, 14, 29] */ /* [13, 14, 29] */ double _imopVarPre154; /* [13, 14, 29] */ double _imopVarPre155; /* [13, 14, 29] */ _imopVarPre154 = sum[m_imopVarPre92] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /* [13, 14, 29] */ _imopVarPre155 = sqrt(_imopVarPre154); /* [13, 14, 29] */ /* [13, 14, 29] */ sum[m_imopVarPre92] = _imopVarPre155; } } /* [13, 14, 29] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 29] */ #pragma omp barrier /* [13, 14, 30] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 30] */ #pragma omp barrier } /* [13, 14, 27, 31] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 27, 31] */ #pragma omp barrier /* [13, 14, 28, 32] */ int i_imopVarPre79; /* [13, 14, 28, 32] */ int j_imopVarPre80; /* [13, 14, 28, 32] */ int k_imopVarPre81; /* [13, 14, 28, 32] */ int m_imopVarPre82; /* [13, 14, 28, 32] */ int L1; /* [13, 14, 28, 32] */ int L2; /* [13, 14, 28, 32] */ int ist1; /* [13, 14, 28, 32] */ int iend1; /* [13, 14, 28, 32] */ int jst1; /* [13, 14, 28, 32] */ int jend1; /* [13, 14, 28, 32] */ double q; /* [13, 14, 28, 32] */ double u21; /* [13, 14, 28, 32] */ double u31; /* [13, 14, 28, 32] */ double u41; /* [13, 14, 28, 32] */ double tmp_imopVarPre83; /* [13, 14, 28, 32] */ double u21i; /* [13, 14, 28, 32] */ double u31i; /* [13, 14, 28, 32] */ double u41i; /* [13, 14, 28, 32] */ double u51i; /* [13, 14, 28, 32] */ double u21j; /* [13, 14, 28, 32] */ double u31j; /* [13, 14, 28, 32] */ double u41j; /* [13, 14, 28, 32] */ double u51j; /* [13, 14, 28, 32] */ double u21k; /* [13, 14, 28, 32] */ double u31k; /* [13, 14, 28, 32] */ double u41k; /* [13, 14, 28, 32] */ double u51k; /* [13, 14, 28, 32] */ double u21im1; /* [13, 14, 28, 32] */ double u31im1; /* [13, 14, 28, 32] */ double u41im1; /* [13, 14, 28, 32] */ double u51im1; /* [13, 14, 28, 32] */ double u21jm1; /* [13, 14, 28, 32] */ double u31jm1; /* [13, 14, 28, 32] */ double u41jm1; /* [13, 14, 28, 32] */ double u51jm1; /* [13, 14, 28, 32] */ double u21km1; /* [13, 14, 28, 32] */ double u31km1; /* [13, 14, 28, 32] */ double u41km1; /* [13, 14, 28, 32] */ double u51km1; /* [13, 14, 28, 32] */ #pragma omp for nowait /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ for (i_imopVarPre79 = 0; i_imopVarPre79 <= nx - 1; i_imopVarPre79++) { /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ for (j_imopVarPre80 = 0; j_imopVarPre80 <= ny - 1; j_imopVarPre80++) { /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 28, 32] */ /* [13, 14, 28, 32] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = -frct[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]; } } } } /* [13, 14, 28, 32] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 28, 32] */ #pragma omp barrier /* [13, 14, 29, 33] */ L1 = 0; /* [13, 14, 29, 33] */ L2 = nx - 1; /* [13, 14, 29, 33] */ #pragma omp for nowait /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ for (i_imopVarPre79 = L1; i_imopVarPre79 <= L2; i_imopVarPre79++) { /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [13, 14, 29, 33] */ /* [13, 14, 29, 33] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /* [13, 14, 29, 33] */ u21 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 29, 33] */ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 29, 33] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u21 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /* [13, 14, 29, 33] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u21; /* [13, 14, 29, 33] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u21; /* [13, 14, 29, 33] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u21; } } } /* [13, 14, 29, 33] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 29, 33] */ #pragma omp barrier /* [13, 14, 30, 34] */ #pragma omp for nowait /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tx2 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } /* [13, 14, 30, 34] */ L2 = nx - 1; /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= L2; i_imopVarPre79++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 30, 34] */ u21i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /* [13, 14, 30, 34] */ u31i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /* [13, 14, 30, 34] */ u41i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /* [13, 14, 30, 34] */ u51i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /* [13, 14, 30, 34] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 30, 34] */ u21im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1]; /* [13, 14, 30, 34] */ u31im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2]; /* [13, 14, 30, 34] */ u41im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3]; /* [13, 14, 30, 34] */ u51im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4]; /* [13, 14, 30, 34] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /* [13, 14, 30, 34] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tx3 * (u31i - u31im1); /* [13, 14, 30, 34] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = tx3 * (u41i - u41im1); /* [13, 14, 30, 34] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dx1 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][0]); /* [13, 14, 30, 34] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dx2 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1]); /* [13, 14, 30, 34] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dx3 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2]); /* [13, 14, 30, 34] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dx4 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3]); /* [13, 14, 30, 34] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dx5 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4]); } /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); /* [13, 14, 30, 34] */ rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } /* [13, 14, 30, 34] */ ist1 = 3; /* [13, 14, 30, 34] */ iend1 = nx - 4; /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (i_imopVarPre79 = ist1; i_imopVarPre79 <= iend1; i_imopVarPre79++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79 - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79 + 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 30, 34] */ /* [13, 14, 30, 34] */ rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 5][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); /* [13, 14, 30, 34] */ rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } } /* [13, 14, 30, 34] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 30, 34] */ #pragma omp barrier /* [13, 14, 31, 35] */ L1 = 0; /* [13, 14, 31, 35] */ L2 = ny - 1; /* [13, 14, 31, 35] */ #pragma omp for nowait /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ for (j_imopVarPre80 = L1; j_imopVarPre80 <= L2; j_imopVarPre80++) { /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [13, 14, 31, 35] */ /* [13, 14, 31, 35] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /* [13, 14, 31, 35] */ u31 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 31, 35] */ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 31, 35] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u31; /* [13, 14, 31, 35] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u31 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /* [13, 14, 31, 35] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u31; /* [13, 14, 31, 35] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u31; } } } /* [13, 14, 31, 35] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 31, 35] */ #pragma omp barrier /* [13, 14, 32, 36] */ #pragma omp for nowait /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - ty2 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82]); } } /* [13, 14, 32, 36] */ L2 = ny - 1; /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= L2; j_imopVarPre80++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 32, 36] */ u21j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /* [13, 14, 32, 36] */ u31j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /* [13, 14, 32, 36] */ u41j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /* [13, 14, 32, 36] */ u51j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /* [13, 14, 32, 36] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0]; /* [13, 14, 32, 36] */ u21jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1]; /* [13, 14, 32, 36] */ u31jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2]; /* [13, 14, 32, 36] */ u41jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3]; /* [13, 14, 32, 36] */ u51jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4]; /* [13, 14, 32, 36] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = ty3 * (u21j - u21jm1); /* [13, 14, 32, 36] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /* [13, 14, 32, 36] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = ty3 * (u41j - u41jm1); /* [13, 14, 32, 36] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dy1 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][0]); /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dy2 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1]); /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dy3 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2]); /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dy4 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3]); /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dy5 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4]); } /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82]); /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][4][k_imopVarPre81][m_imopVarPre82]); } /* [13, 14, 32, 36] */ jst1 = 3; /* [13, 14, 32, 36] */ jend1 = ny - 4; /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (j_imopVarPre80 = jst1; j_imopVarPre80 <= jend1; j_imopVarPre80++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80 - 2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80 + 2][k_imopVarPre81][m_imopVarPre82]); } } /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 32, 36] */ /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 5][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]); /* [13, 14, 32, 36] */ rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]); } } } /* [13, 14, 32, 36] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 32, 36] */ #pragma omp barrier /* [13, 14, 33, 37] */ #pragma omp for nowait /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /* [13, 14, 33, 37] */ u41 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 33, 37] */ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u41; /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u41; /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u41 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u41; } /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tz2 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82]); } } /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [13, 14, 33, 37] */ u21k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /* [13, 14, 33, 37] */ u31k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /* [13, 14, 33, 37] */ u41k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /* [13, 14, 33, 37] */ u51k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /* [13, 14, 33, 37] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0]; /* [13, 14, 33, 37] */ u21km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1]; /* [13, 14, 33, 37] */ u31km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2]; /* [13, 14, 33, 37] */ u41km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3]; /* [13, 14, 33, 37] */ u51km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4]; /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = tz3 * (u21k - u21km1); /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tz3 * (u31k - u31km1); /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /* [13, 14, 33, 37] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dz1 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][0]); /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dz2 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1]); /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dz3 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2]); /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dz4 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3]); /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dz5 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4]); } /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82]); /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][4][m_imopVarPre82]); } /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (k_imopVarPre81 = 3; k_imopVarPre81 <= nz - 4; k_imopVarPre81++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 2][m_imopVarPre82]); } } /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [13, 14, 33, 37] */ /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 5][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]); /* [13, 14, 33, 37] */ rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]); } } } /* [13, 14, 33, 37] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 33, 37] */ #pragma omp barrier /* [13, 14, 34, 38] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 34, 38] */ #pragma omp barrier /* [13, 14, 35, 39] */ #pragma omp master { /* [13, 14, 35, 39] */ /* [13, 14, 35, 39] */ _imopVarPre372 = (istep % inorm == 0); /* [13, 14, 35, 39] */ /* [13, 14, 35, 39] */ if (!_imopVarPre372) { /* [13, 14, 35, 39] */ /* [13, 14, 35, 39] */ _imopVarPre372 = (istep == itmax); } } /* [13, 14, 35, 39] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 35, 39] */ #pragma omp barrier /* [13, 14, 36, 40] */ /* [13, 14, 36, 40] */ if (_imopVarPre372) { /* [13, 14, 36, 40] */ /* [13, 14, 36, 40] */ double (*v)[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [13, 14, 36, 40] */ double *sum; /* [13, 14, 36, 40] */ v = rsd; /* [13, 14, 36, 40] */ sum = rsdnm; /* [13, 14, 36, 40] */ int i_imopVarPre93; /* [13, 14, 36, 40] */ int j_imopVarPre94; /* [13, 14, 36, 40] */ int k_imopVarPre95; /* [13, 14, 36, 40] */ int m_imopVarPre96; /* [13, 14, 36, 40] */ double sum0 = 0.0; /* [13, 14, 36, 40] */ double sum1 = 0.0; /* [13, 14, 36, 40] */ double sum2 = 0.0; /* [13, 14, 36, 40] */ double sum3 = 0.0; /* [13, 14, 36, 40] */ double sum4 = 0.0; /* [13, 14, 36, 40] */ #pragma omp single nowait { /* [13, 14, 36, 40] */ /* [13, 14, 36, 40] */ /* [13, 14, 36, 40] */ /* [13, 14, 36, 40] */ /* [13, 14, 36, 40] */ for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) { /* [13, 14, 36, 40] */ /* [13, 14, 36, 40] */ sum[m_imopVarPre96] = 0.0; } } /* [13, 14, 36, 40] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 36, 40] */ #pragma omp barrier /* [13, 14, 37] */ #pragma omp for nowait /* [13, 14, 37] */ /* [13, 14, 37] */ /* [13, 14, 37] */ for (i_imopVarPre93 = ist; i_imopVarPre93 <= iend; i_imopVarPre93++) { /* [13, 14, 37] */ /* [13, 14, 37] */ /* [13, 14, 37] */ /* [13, 14, 37] */ /* [13, 14, 37] */ for (j_imopVarPre94 = jst; j_imopVarPre94 <= jend; j_imopVarPre94++) { /* [13, 14, 37] */ /* [13, 14, 37] */ /* [13, 14, 37] */ /* [13, 14, 37] */ /* [13, 14, 37] */ for (k_imopVarPre95 = 1; k_imopVarPre95 <= nz0 - 2; k_imopVarPre95++) { /* [13, 14, 37] */ /* [13, 14, 37] */ sum0 = sum0 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0]; /* [13, 14, 37] */ sum1 = sum1 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1]; /* [13, 14, 37] */ sum2 = sum2 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2]; /* [13, 14, 37] */ sum3 = sum3 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3]; /* [13, 14, 37] */ sum4 = sum4 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4]; } } } /* [13, 14, 37] */ // #pragma omp dummyFlush CRITICAL_START /* [13, 14, 37] */ #pragma omp critical { /* [13, 14, 37] */ /* [13, 14, 37] */ sum[0] += sum0; /* [13, 14, 37] */ sum[1] += sum1; /* [13, 14, 37] */ sum[2] += sum2; /* [13, 14, 37] */ sum[3] += sum3; /* [13, 14, 37] */ sum[4] += sum4; } /* [13, 14, 37] */ // #pragma omp dummyFlush CRITICAL_END /* [13, 14, 37] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 37] */ #pragma omp barrier /* [13, 14, 38] */ #pragma omp single nowait { /* [13, 14, 38] */ /* [13, 14, 38] */ /* [13, 14, 38] */ /* [13, 14, 38] */ /* [13, 14, 38] */ for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) { /* [13, 14, 38] */ /* [13, 14, 38] */ double _imopVarPre154; /* [13, 14, 38] */ double _imopVarPre155; /* [13, 14, 38] */ _imopVarPre154 = sum[m_imopVarPre96] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /* [13, 14, 38] */ _imopVarPre155 = sqrt(_imopVarPre154); /* [13, 14, 38] */ /* [13, 14, 38] */ sum[m_imopVarPre96] = _imopVarPre155; } } /* [13, 14, 38] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 38] */ #pragma omp barrier /* [13, 14, 39] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 39] */ #pragma omp barrier } /* [13, 14, 36, 40] */ // #pragma omp dummyFlush BARRIER_START /* [13, 14, 36, 40] */ #pragma omp barrier /* [13, 14, 37] */ #pragma omp master { /* [13, 14, 37] */ /* [13, 14, 37] */ _imopVarPre377 = (rsdnm[0] < tolrsd[0]); /* [13, 14, 37] */ /* [13, 14, 37] */ if (_imopVarPre377) { /* [13, 14, 37] */ /* [13, 14, 37] */ _imopVarPre378 = (rsdnm[1] < tolrsd[1]); /* [13, 14, 37] */ /* [13, 14, 37] */ if (_imopVarPre378) { /* [13, 14, 37] */ /* [13, 14, 37] */ _imopVarPre379 = (rsdnm[2] < tolrsd[2]); /* [13, 14, 37] */ /* [13, 14, 37] */ if (_imopVarPre379) { /* [13, 14, 37] */ /* [13, 14, 37] */ _imopVarPre380 = (rsdnm[3] < tolrsd[3]); /* [13, 14, 37] */ /* [13, 14, 37] */ if (_imopVarPre380) { /* [13, 14, 37] */ /* [13, 14, 37] */ _imopVarPre380 = (rsdnm[4] < tolrsd[4]); } /* [13, 14, 37] */ _imopVarPre379 = _imopVarPre380; } /* [13, 14, 37] */ _imopVarPre378 = _imopVarPre379; } /* [13, 14, 37] */ _imopVarPre377 = _imopVarPre378; } /* [13, 14, 37] */ /* [13, 14, 37] */ if (_imopVarPre377) { /* [13, 14, 37] */ /* [13, 14, 37] */ exit(1); /* [13, 14, 37] */ } } } } /* [13, 14] */ timer_stop(1); /* [13, 14] */ /* [13, 14] */ maxtime = timer_read(1); /* [13, 14] */ /* [] */ error(); /* [] */ /* [] */ pintgr(); /* [] */ /* [] */ int *_imopVarPre144; /* [] */ char *_imopVarPre145; /* [] */ _imopVarPre144 = &verified; /* [] */ _imopVarPre145 = &class; /* [] */ verify(rsdnm, errnm, frc, _imopVarPre145, _imopVarPre144); /* [] */ /* [] */ mflops = (double)itmax *(1984.77 * (double)nx0 * (double)ny0 * (double)nz0 - 10923.3 * (((double)(nx0 + ny0 + nz0) / 3.0) * ((double)(nx0 + ny0 + nz0) / 3.0)) + 27770.9 * (double)(nx0 + ny0 + nz0) / 3.0 - 144010.0) / (maxtime * 1000000.0); /* [] */ c_print_results("LU", class, nx0, ny0, nz0, itmax, nthreads, maxtime, mflops, " floating point", verified, "3.0 structured", "21 Jul 2017", "gcc", "gcc", "(none)", "-I../common", "-O3 -fopenmp", "-O3 -fopenmp", "(none)"); /* [] */ } /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ static void blts(int nx, int ny, int nz, int k, double omega, double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5], double ldz[12][12][5][5], double ldy[12][12][5][5], double ldx[12][12][5][5], double d[12][12][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ int i; /* [13, 14, 25, 41] */ int j; /* [13, 14, 25, 41] */ int m; /* [13, 14, 25, 41] */ double tmp; /* [13, 14, 25, 41] */ double tmp1; /* [13, 14, 25, 41] */ double tmat[5][5]; /* [13, 14, 25, 41] */ #pragma omp for nowait schedule(static) /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (i = ist; i <= iend; i++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (j = jst; j <= jend; j++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (m = 0; m < 5; m++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ v[i][j][k][m] = v[i][j][k][m] - omega * (ldz[i][j][m][0] * v[i][j][k - 1][0] + ldz[i][j][m][1] * v[i][j][k - 1][1] + ldz[i][j][m][2] * v[i][j][k - 1][2] + ldz[i][j][m][3] * v[i][j][k - 1][3] + ldz[i][j][m][4] * v[i][j][k - 1][4]); } } } /* [13, 14, 25, 41] */ #pragma omp for nowait schedule(static) /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (i = ist; i <= iend; i++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ if (i != ist) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ while (flag[i - 1] == 0) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ // #pragma omp dummyFlush FLUSH_START /* [13, 14, 25, 41] */ #pragma omp flush(flag) /* [13, 14, 25, 41] */ ; } } /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ if (i != iend) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ while (flag[i] == 1) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ // #pragma omp dummyFlush FLUSH_START /* [13, 14, 25, 41] */ #pragma omp flush(flag) /* [13, 14, 25, 41] */ ; } } /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (j = jst; j <= jend; j++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (m = 0; m < 5; m++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ v[i][j][k][m] = v[i][j][k][m] - omega * (ldy[i][j][m][0] * v[i][j - 1][k][0] + ldx[i][j][m][0] * v[i - 1][j][k][0] + ldy[i][j][m][1] * v[i][j - 1][k][1] + ldx[i][j][m][1] * v[i - 1][j][k][1] + ldy[i][j][m][2] * v[i][j - 1][k][2] + ldx[i][j][m][2] * v[i - 1][j][k][2] + ldy[i][j][m][3] * v[i][j - 1][k][3] + ldx[i][j][m][3] * v[i - 1][j][k][3] + ldy[i][j][m][4] * v[i][j - 1][k][4] + ldx[i][j][m][4] * v[i - 1][j][k][4]); } /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (m = 0; m < 5; m++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ tmat[m][0] = d[i][j][m][0]; /* [13, 14, 25, 41] */ tmat[m][1] = d[i][j][m][1]; /* [13, 14, 25, 41] */ tmat[m][2] = d[i][j][m][2]; /* [13, 14, 25, 41] */ tmat[m][3] = d[i][j][m][3]; /* [13, 14, 25, 41] */ tmat[m][4] = d[i][j][m][4]; } /* [13, 14, 25, 41] */ tmp1 = 1.0 / tmat[0][0]; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[1][0]; /* [13, 14, 25, 41] */ tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; /* [13, 14, 25, 41] */ tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; /* [13, 14, 25, 41] */ tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; /* [13, 14, 25, 41] */ tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; /* [13, 14, 25, 41] */ v[i][j][k][1] = v[i][j][k][1] - v[i][j][k][0] * tmp; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[2][0]; /* [13, 14, 25, 41] */ tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; /* [13, 14, 25, 41] */ tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; /* [13, 14, 25, 41] */ tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; /* [13, 14, 25, 41] */ tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; /* [13, 14, 25, 41] */ v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][0] * tmp; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[3][0]; /* [13, 14, 25, 41] */ tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; /* [13, 14, 25, 41] */ tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; /* [13, 14, 25, 41] */ tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; /* [13, 14, 25, 41] */ tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; /* [13, 14, 25, 41] */ v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][0] * tmp; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[4][0]; /* [13, 14, 25, 41] */ tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; /* [13, 14, 25, 41] */ tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; /* [13, 14, 25, 41] */ tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; /* [13, 14, 25, 41] */ tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; /* [13, 14, 25, 41] */ v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][0] * tmp; /* [13, 14, 25, 41] */ tmp1 = 1.0 / tmat[1][1]; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[2][1]; /* [13, 14, 25, 41] */ tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; /* [13, 14, 25, 41] */ tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; /* [13, 14, 25, 41] */ tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; /* [13, 14, 25, 41] */ v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][1] * tmp; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[3][1]; /* [13, 14, 25, 41] */ tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; /* [13, 14, 25, 41] */ tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; /* [13, 14, 25, 41] */ tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; /* [13, 14, 25, 41] */ v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][1] * tmp; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[4][1]; /* [13, 14, 25, 41] */ tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; /* [13, 14, 25, 41] */ tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; /* [13, 14, 25, 41] */ tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; /* [13, 14, 25, 41] */ v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][1] * tmp; /* [13, 14, 25, 41] */ tmp1 = 1.0 / tmat[2][2]; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[3][2]; /* [13, 14, 25, 41] */ tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; /* [13, 14, 25, 41] */ tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; /* [13, 14, 25, 41] */ v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][2] * tmp; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[4][2]; /* [13, 14, 25, 41] */ tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; /* [13, 14, 25, 41] */ tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; /* [13, 14, 25, 41] */ v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][2] * tmp; /* [13, 14, 25, 41] */ tmp1 = 1.0 / tmat[3][3]; /* [13, 14, 25, 41] */ tmp = tmp1 * tmat[4][3]; /* [13, 14, 25, 41] */ tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; /* [13, 14, 25, 41] */ v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][3] * tmp; /* [13, 14, 25, 41] */ v[i][j][k][4] = v[i][j][k][4] / tmat[4][4]; /* [13, 14, 25, 41] */ v[i][j][k][3] = v[i][j][k][3] - tmat[3][4] * v[i][j][k][4]; /* [13, 14, 25, 41] */ v[i][j][k][3] = v[i][j][k][3] / tmat[3][3]; /* [13, 14, 25, 41] */ v[i][j][k][2] = v[i][j][k][2] - tmat[2][3] * v[i][j][k][3] - tmat[2][4] * v[i][j][k][4]; /* [13, 14, 25, 41] */ v[i][j][k][2] = v[i][j][k][2] / tmat[2][2]; /* [13, 14, 25, 41] */ v[i][j][k][1] = v[i][j][k][1] - tmat[1][2] * v[i][j][k][2] - tmat[1][3] * v[i][j][k][3] - tmat[1][4] * v[i][j][k][4]; /* [13, 14, 25, 41] */ v[i][j][k][1] = v[i][j][k][1] / tmat[1][1]; /* [13, 14, 25, 41] */ v[i][j][k][0] = v[i][j][k][0] - tmat[0][1] * v[i][j][k][1] - tmat[0][2] * v[i][j][k][2] - tmat[0][3] * v[i][j][k][3] - tmat[0][4] * v[i][j][k][4]; /* [13, 14, 25, 41] */ v[i][j][k][0] = v[i][j][k][0] / tmat[0][0]; } /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ if (i != ist) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ flag[i - 1] = 0; } /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ if (i != iend) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ flag[i] = 1; } /* [13, 14, 25, 41] */ // #pragma omp dummyFlush FLUSH_START /* [13, 14, 25, 41] */ #pragma omp flush(flag) } } /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ static void buts(int nx, int ny, int nz, int k, double omega, double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5], double tv[12][12][5], double d[12][12][5][5], double udx[12][12][5][5], double udy[12][12][5][5], double udz[12][12][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ int i; /* [13, 14, 26, 42] */ int j; /* [13, 14, 26, 42] */ int m; /* [13, 14, 26, 42] */ double tmp; /* [13, 14, 26, 42] */ double tmp1; /* [13, 14, 26, 42] */ double tmat[5][5]; /* [13, 14, 26, 42] */ #pragma omp for nowait schedule(static) /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (i = iend; i >= ist; i--) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (j = jend; j >= jst; j--) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (m = 0; m < 5; m++) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ tv[i][j][m] = omega * (udz[i][j][m][0] * v[i][j][k + 1][0] + udz[i][j][m][1] * v[i][j][k + 1][1] + udz[i][j][m][2] * v[i][j][k + 1][2] + udz[i][j][m][3] * v[i][j][k + 1][3] + udz[i][j][m][4] * v[i][j][k + 1][4]); } } } /* [13, 14, 26, 42] */ #pragma omp for nowait schedule(static) /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (i = iend; i >= ist; i--) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ if (i != iend) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ while (flag[i + 1] == 0) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ // #pragma omp dummyFlush FLUSH_START /* [13, 14, 26, 42] */ #pragma omp flush(flag) /* [13, 14, 26, 42] */ ; } } /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ if (i != ist) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ while (flag[i] == 1) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ // #pragma omp dummyFlush FLUSH_START /* [13, 14, 26, 42] */ #pragma omp flush(flag) /* [13, 14, 26, 42] */ ; } } /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (j = jend; j >= jst; j--) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (m = 0; m < 5; m++) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ tv[i][j][m] = tv[i][j][m] + omega * (udy[i][j][m][0] * v[i][j + 1][k][0] + udx[i][j][m][0] * v[i + 1][j][k][0] + udy[i][j][m][1] * v[i][j + 1][k][1] + udx[i][j][m][1] * v[i + 1][j][k][1] + udy[i][j][m][2] * v[i][j + 1][k][2] + udx[i][j][m][2] * v[i + 1][j][k][2] + udy[i][j][m][3] * v[i][j + 1][k][3] + udx[i][j][m][3] * v[i + 1][j][k][3] + udy[i][j][m][4] * v[i][j + 1][k][4] + udx[i][j][m][4] * v[i + 1][j][k][4]); } /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (m = 0; m < 5; m++) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ tmat[m][0] = d[i][j][m][0]; /* [13, 14, 26, 42] */ tmat[m][1] = d[i][j][m][1]; /* [13, 14, 26, 42] */ tmat[m][2] = d[i][j][m][2]; /* [13, 14, 26, 42] */ tmat[m][3] = d[i][j][m][3]; /* [13, 14, 26, 42] */ tmat[m][4] = d[i][j][m][4]; } /* [13, 14, 26, 42] */ tmp1 = 1.0 / tmat[0][0]; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[1][0]; /* [13, 14, 26, 42] */ tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; /* [13, 14, 26, 42] */ tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; /* [13, 14, 26, 42] */ tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; /* [13, 14, 26, 42] */ tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; /* [13, 14, 26, 42] */ tv[i][j][1] = tv[i][j][1] - tv[i][j][0] * tmp; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[2][0]; /* [13, 14, 26, 42] */ tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; /* [13, 14, 26, 42] */ tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; /* [13, 14, 26, 42] */ tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; /* [13, 14, 26, 42] */ tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; /* [13, 14, 26, 42] */ tv[i][j][2] = tv[i][j][2] - tv[i][j][0] * tmp; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[3][0]; /* [13, 14, 26, 42] */ tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; /* [13, 14, 26, 42] */ tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; /* [13, 14, 26, 42] */ tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; /* [13, 14, 26, 42] */ tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; /* [13, 14, 26, 42] */ tv[i][j][3] = tv[i][j][3] - tv[i][j][0] * tmp; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[4][0]; /* [13, 14, 26, 42] */ tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; /* [13, 14, 26, 42] */ tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; /* [13, 14, 26, 42] */ tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; /* [13, 14, 26, 42] */ tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; /* [13, 14, 26, 42] */ tv[i][j][4] = tv[i][j][4] - tv[i][j][0] * tmp; /* [13, 14, 26, 42] */ tmp1 = 1.0 / tmat[1][1]; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[2][1]; /* [13, 14, 26, 42] */ tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; /* [13, 14, 26, 42] */ tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; /* [13, 14, 26, 42] */ tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; /* [13, 14, 26, 42] */ tv[i][j][2] = tv[i][j][2] - tv[i][j][1] * tmp; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[3][1]; /* [13, 14, 26, 42] */ tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; /* [13, 14, 26, 42] */ tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; /* [13, 14, 26, 42] */ tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; /* [13, 14, 26, 42] */ tv[i][j][3] = tv[i][j][3] - tv[i][j][1] * tmp; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[4][1]; /* [13, 14, 26, 42] */ tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; /* [13, 14, 26, 42] */ tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; /* [13, 14, 26, 42] */ tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; /* [13, 14, 26, 42] */ tv[i][j][4] = tv[i][j][4] - tv[i][j][1] * tmp; /* [13, 14, 26, 42] */ tmp1 = 1.0 / tmat[2][2]; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[3][2]; /* [13, 14, 26, 42] */ tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; /* [13, 14, 26, 42] */ tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; /* [13, 14, 26, 42] */ tv[i][j][3] = tv[i][j][3] - tv[i][j][2] * tmp; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[4][2]; /* [13, 14, 26, 42] */ tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; /* [13, 14, 26, 42] */ tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; /* [13, 14, 26, 42] */ tv[i][j][4] = tv[i][j][4] - tv[i][j][2] * tmp; /* [13, 14, 26, 42] */ tmp1 = 1.0 / tmat[3][3]; /* [13, 14, 26, 42] */ tmp = tmp1 * tmat[4][3]; /* [13, 14, 26, 42] */ tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; /* [13, 14, 26, 42] */ tv[i][j][4] = tv[i][j][4] - tv[i][j][3] * tmp; /* [13, 14, 26, 42] */ tv[i][j][4] = tv[i][j][4] / tmat[4][4]; /* [13, 14, 26, 42] */ tv[i][j][3] = tv[i][j][3] - tmat[3][4] * tv[i][j][4]; /* [13, 14, 26, 42] */ tv[i][j][3] = tv[i][j][3] / tmat[3][3]; /* [13, 14, 26, 42] */ tv[i][j][2] = tv[i][j][2] - tmat[2][3] * tv[i][j][3] - tmat[2][4] * tv[i][j][4]; /* [13, 14, 26, 42] */ tv[i][j][2] = tv[i][j][2] / tmat[2][2]; /* [13, 14, 26, 42] */ tv[i][j][1] = tv[i][j][1] - tmat[1][2] * tv[i][j][2] - tmat[1][3] * tv[i][j][3] - tmat[1][4] * tv[i][j][4]; /* [13, 14, 26, 42] */ tv[i][j][1] = tv[i][j][1] / tmat[1][1]; /* [13, 14, 26, 42] */ tv[i][j][0] = tv[i][j][0] - tmat[0][1] * tv[i][j][1] - tmat[0][2] * tv[i][j][2] - tmat[0][3] * tv[i][j][3] - tmat[0][4] * tv[i][j][4]; /* [13, 14, 26, 42] */ tv[i][j][0] = tv[i][j][0] / tmat[0][0]; /* [13, 14, 26, 42] */ v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0]; /* [13, 14, 26, 42] */ v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1]; /* [13, 14, 26, 42] */ v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2]; /* [13, 14, 26, 42] */ v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3]; /* [13, 14, 26, 42] */ v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4]; } /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ if (i != iend) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ flag[i + 1] = 0; } /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ if (i != ist) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ flag[i] = 1; } /* [13, 14, 26, 42] */ // #pragma omp dummyFlush FLUSH_START /* [13, 14, 26, 42] */ #pragma omp flush(flag) } } /* [] */ static void domain() { /* [] */ /* [] */ nx = nx0; /* [] */ ny = ny0; /* [] */ nz = nz0; /* [] */ int _imopVarPre146; /* [] */ int _imopVarPre147; /* [] */ _imopVarPre146 = nx < 4; /* [] */ /* [] */ if (!_imopVarPre146) { /* [] */ /* [] */ _imopVarPre147 = ny < 4; /* [] */ /* [] */ if (!_imopVarPre147) { /* [] */ /* [] */ _imopVarPre147 = nz < 4; } /* [] */ _imopVarPre146 = _imopVarPre147; } /* [] */ /* [] */ if (_imopVarPre146) { /* [] */ /* [] */ printf(" SUBDOMAIN SIZE IS TOO SMALL - \n" " ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n" " SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n" " TO 4 THEY ARE CURRENTLY%3d%3d%3d\n", nx, ny, nz); /* [] */ /* [] */ exit(1); /* [] */ } /* [] */ int _imopVarPre148; /* [] */ int _imopVarPre149; /* [] */ _imopVarPre148 = nx > 12; /* [] */ /* [] */ if (!_imopVarPre148) { /* [] */ /* [] */ _imopVarPre149 = ny > 12; /* [] */ /* [] */ if (!_imopVarPre149) { /* [] */ /* [] */ _imopVarPre149 = nz > 12; } /* [] */ _imopVarPre148 = _imopVarPre149; } /* [] */ /* [] */ if (_imopVarPre148) { /* [] */ /* [] */ printf(" SUBDOMAIN SIZE IS TOO LARGE - \n" " ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n" " SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n" " ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n" " CURRENTLY%4d%4d%4d\n", nx, ny, nz); /* [] */ /* [] */ exit(1); /* [] */ } /* [] */ ist = 1; /* [] */ iend = nx - 2; /* [] */ jst = 1; /* [] */ jend = ny - 2; } /* [] */ static void erhs() { /* [] */ /* [43] */ #pragma omp parallel { /* [43] */ /* [43] */ int i; /* [43] */ int j; /* [43] */ int k; /* [43] */ int m; /* [43] */ int iglob; /* [43] */ int jglob; /* [43] */ int L1; /* [43] */ int L2; /* [43] */ int ist1; /* [43] */ int iend1; /* [43] */ int jst1; /* [43] */ int jend1; /* [43] */ double dsspm; /* [43] */ double xi; /* [43] */ double eta; /* [43] */ double zeta; /* [43] */ double q; /* [43] */ double u21; /* [43] */ double u31; /* [43] */ double u41; /* [43] */ double tmp; /* [43] */ double u21i; /* [43] */ double u31i; /* [43] */ double u41i; /* [43] */ double u51i; /* [43] */ double u21j; /* [43] */ double u31j; /* [43] */ double u41j; /* [43] */ double u51j; /* [43] */ double u21k; /* [43] */ double u31k; /* [43] */ double u41k; /* [43] */ double u51k; /* [43] */ double u21im1; /* [43] */ double u31im1; /* [43] */ double u41im1; /* [43] */ double u51im1; /* [43] */ double u21jm1; /* [43] */ double u31jm1; /* [43] */ double u41jm1; /* [43] */ double u51jm1; /* [43] */ double u21km1; /* [43] */ double u31km1; /* [43] */ double u41km1; /* [43] */ double u51km1; /* [43] */ dsspm = dssp; /* [43] */ #pragma omp for nowait /* [43] */ /* [43] */ /* [43] */ for (i = 0; i < nx; i++) { /* [43] */ /* [43] */ /* [43] */ /* [43] */ /* [43] */ for (j = 0; j < ny; j++) { /* [43] */ /* [43] */ /* [43] */ /* [43] */ /* [43] */ for (k = 0; k < nz; k++) { /* [43] */ /* [43] */ /* [43] */ /* [43] */ /* [43] */ for (m = 0; m < 5; m++) { /* [43] */ /* [43] */ frct[i][j][k][m] = 0.0; } } } } /* [43] */ #pragma omp for nowait /* [43] */ /* [43] */ /* [43] */ for (i = 0; i < nx; i++) { /* [43] */ /* [43] */ iglob = i; /* [43] */ xi = ((double)iglob) / (nx0 - 1); /* [43] */ /* [43] */ /* [43] */ /* [43] */ for (j = 0; j < ny; j++) { /* [43] */ /* [43] */ jglob = j; /* [43] */ eta = ((double)jglob) / (ny0 - 1); /* [43] */ /* [43] */ /* [43] */ /* [43] */ for (k = 0; k < nz; k++) { /* [43] */ /* [43] */ zeta = ((double)k) / (nz - 1); /* [43] */ /* [43] */ /* [43] */ /* [43] */ for (m = 0; m < 5; m++) { /* [43] */ /* [43] */ rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } } } /* [43] */ // #pragma omp dummyFlush BARRIER_START /* [43] */ #pragma omp barrier /* [44] */ L1 = 0; /* [44] */ L2 = nx - 1; /* [44] */ #pragma omp for nowait /* [44] */ /* [44] */ /* [44] */ for (i = L1; i <= L2; i++) { /* [44] */ /* [44] */ /* [44] */ /* [44] */ /* [44] */ for (j = jst; j <= jend; j++) { /* [44] */ /* [44] */ /* [44] */ /* [44] */ /* [44] */ for (k = 1; k < nz - 1; k++) { /* [44] */ /* [44] */ flux[i][j][k][0] = rsd[i][j][k][1]; /* [44] */ u21 = rsd[i][j][k][1] / rsd[i][j][k][0]; /* [44] */ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /* [44] */ flux[i][j][k][1] = rsd[i][j][k][1] * u21 + 0.40e+00 * (rsd[i][j][k][4] - q); /* [44] */ flux[i][j][k][2] = rsd[i][j][k][2] * u21; /* [44] */ flux[i][j][k][3] = rsd[i][j][k][3] * u21; /* [44] */ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21; } } } /* [44] */ // #pragma omp dummyFlush BARRIER_START /* [44] */ #pragma omp barrier /* [45] */ #pragma omp for nowait /* [45] */ /* [45] */ /* [45] */ for (j = jst; j <= jend; j++) { /* [45] */ /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (k = 1; k <= nz - 2; k++) { /* [45] */ /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (i = ist; i <= iend; i++) { /* [45] */ /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (m = 0; m < 5; m++) { /* [45] */ /* [45] */ frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]); } } /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (i = ist; i <= L2; i++) { /* [45] */ /* [45] */ tmp = 1.0 / rsd[i][j][k][0]; /* [45] */ u21i = tmp * rsd[i][j][k][1]; /* [45] */ u31i = tmp * rsd[i][j][k][2]; /* [45] */ u41i = tmp * rsd[i][j][k][3]; /* [45] */ u51i = tmp * rsd[i][j][k][4]; /* [45] */ tmp = 1.0 / rsd[i - 1][j][k][0]; /* [45] */ u21im1 = tmp * rsd[i - 1][j][k][1]; /* [45] */ u31im1 = tmp * rsd[i - 1][j][k][2]; /* [45] */ u41im1 = tmp * rsd[i - 1][j][k][3]; /* [45] */ u51im1 = tmp * rsd[i - 1][j][k][4]; /* [45] */ flux[i][j][k][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /* [45] */ flux[i][j][k][2] = tx3 * (u31i - u31im1); /* [45] */ flux[i][j][k][3] = tx3 * (u41i - u41im1); /* [45] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * ((u21i * u21i + u31i * u31i + u41i * u41i) - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + (1.0 / 6.0) * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (i = ist; i <= iend; i++) { /* [45] */ /* [45] */ frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * (rsd[i - 1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i + 1][j][k][0]); /* [45] */ frct[i][j][k][1] = frct[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (rsd[i - 1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i + 1][j][k][1]); /* [45] */ frct[i][j][k][2] = frct[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (rsd[i - 1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i + 1][j][k][2]); /* [45] */ frct[i][j][k][3] = frct[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (rsd[i - 1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i + 1][j][k][3]); /* [45] */ frct[i][j][k][4] = frct[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (rsd[i - 1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i + 1][j][k][4]); } /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (m = 0; m < 5; m++) { /* [45] */ /* [45] */ frct[1][j][k][m] = frct[1][j][k][m] - dsspm * (+5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m]); /* [45] */ frct[2][j][k][m] = frct[2][j][k][m] - dsspm * (-4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m]); } /* [45] */ ist1 = 3; /* [45] */ iend1 = nx - 4; /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (i = ist1; i <= iend1; i++) { /* [45] */ /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (m = 0; m < 5; m++) { /* [45] */ /* [45] */ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]); } } /* [45] */ /* [45] */ /* [45] */ /* [45] */ for (m = 0; m < 5; m++) { /* [45] */ /* [45] */ frct[nx - 3][j][k][m] = frct[nx - 3][j][k][m] - dsspm * (rsd[nx - 5][j][k][m] - 4.0 * rsd[nx - 4][j][k][m] + 6.0 * rsd[nx - 3][j][k][m] - 4.0 * rsd[nx - 2][j][k][m]); /* [45] */ frct[nx - 2][j][k][m] = frct[nx - 2][j][k][m] - dsspm * (rsd[nx - 4][j][k][m] - 4.0 * rsd[nx - 3][j][k][m] + 5.0 * rsd[nx - 2][j][k][m]); } } } /* [45] */ // #pragma omp dummyFlush BARRIER_START /* [45] */ #pragma omp barrier /* [46] */ L1 = 0; /* [46] */ L2 = ny - 1; /* [46] */ #pragma omp for nowait /* [46] */ /* [46] */ /* [46] */ for (i = ist; i <= iend; i++) { /* [46] */ /* [46] */ /* [46] */ /* [46] */ /* [46] */ for (j = L1; j <= L2; j++) { /* [46] */ /* [46] */ /* [46] */ /* [46] */ /* [46] */ for (k = 1; k <= nz - 2; k++) { /* [46] */ /* [46] */ flux[i][j][k][0] = rsd[i][j][k][2]; /* [46] */ u31 = rsd[i][j][k][2] / rsd[i][j][k][0]; /* [46] */ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /* [46] */ flux[i][j][k][1] = rsd[i][j][k][1] * u31; /* [46] */ flux[i][j][k][2] = rsd[i][j][k][2] * u31 + 0.40e+00 * (rsd[i][j][k][4] - q); /* [46] */ flux[i][j][k][3] = rsd[i][j][k][3] * u31; /* [46] */ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31; } } } /* [46] */ // #pragma omp dummyFlush BARRIER_START /* [46] */ #pragma omp barrier /* [47] */ #pragma omp for nowait /* [47] */ /* [47] */ /* [47] */ for (i = ist; i <= iend; i++) { /* [47] */ /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (k = 1; k <= nz - 2; k++) { /* [47] */ /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (j = jst; j <= jend; j++) { /* [47] */ /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (m = 0; m < 5; m++) { /* [47] */ /* [47] */ frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]); } } /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (j = jst; j <= L2; j++) { /* [47] */ /* [47] */ tmp = 1.0 / rsd[i][j][k][0]; /* [47] */ u21j = tmp * rsd[i][j][k][1]; /* [47] */ u31j = tmp * rsd[i][j][k][2]; /* [47] */ u41j = tmp * rsd[i][j][k][3]; /* [47] */ u51j = tmp * rsd[i][j][k][4]; /* [47] */ tmp = 1.0 / rsd[i][j - 1][k][0]; /* [47] */ u21jm1 = tmp * rsd[i][j - 1][k][1]; /* [47] */ u31jm1 = tmp * rsd[i][j - 1][k][2]; /* [47] */ u41jm1 = tmp * rsd[i][j - 1][k][3]; /* [47] */ u51jm1 = tmp * rsd[i][j - 1][k][4]; /* [47] */ flux[i][j][k][1] = ty3 * (u21j - u21jm1); /* [47] */ flux[i][j][k][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /* [47] */ flux[i][j][k][3] = ty3 * (u41j - u41jm1); /* [47] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * ((u21j * u21j + u31j * u31j + u41j * u41j) - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + (1.0 / 6.0) * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (j = jst; j <= jend; j++) { /* [47] */ /* [47] */ frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * (rsd[i][j - 1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j + 1][k][0]); /* [47] */ frct[i][j][k][1] = frct[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (rsd[i][j - 1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j + 1][k][1]); /* [47] */ frct[i][j][k][2] = frct[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (rsd[i][j - 1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j + 1][k][2]); /* [47] */ frct[i][j][k][3] = frct[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (rsd[i][j - 1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j + 1][k][3]); /* [47] */ frct[i][j][k][4] = frct[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (rsd[i][j - 1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j + 1][k][4]); } /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (m = 0; m < 5; m++) { /* [47] */ /* [47] */ frct[i][1][k][m] = frct[i][1][k][m] - dsspm * (+5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m]); /* [47] */ frct[i][2][k][m] = frct[i][2][k][m] - dsspm * (-4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m]); } /* [47] */ jst1 = 3; /* [47] */ jend1 = ny - 4; /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (j = jst1; j <= jend1; j++) { /* [47] */ /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (m = 0; m < 5; m++) { /* [47] */ /* [47] */ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]); } } /* [47] */ /* [47] */ /* [47] */ /* [47] */ for (m = 0; m < 5; m++) { /* [47] */ /* [47] */ frct[i][ny - 3][k][m] = frct[i][ny - 3][k][m] - dsspm * (rsd[i][ny - 5][k][m] - 4.0 * rsd[i][ny - 4][k][m] + 6.0 * rsd[i][ny - 3][k][m] - 4.0 * rsd[i][ny - 2][k][m]); /* [47] */ frct[i][ny - 2][k][m] = frct[i][ny - 2][k][m] - dsspm * (rsd[i][ny - 4][k][m] - 4.0 * rsd[i][ny - 3][k][m] + 5.0 * rsd[i][ny - 2][k][m]); } } } /* [47] */ // #pragma omp dummyFlush BARRIER_START /* [47] */ #pragma omp barrier /* [48] */ #pragma omp for nowait /* [48] */ /* [48] */ /* [48] */ for (i = ist; i <= iend; i++) { /* [48] */ /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (j = jst; j <= jend; j++) { /* [48] */ /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (k = 0; k <= nz - 1; k++) { /* [48] */ /* [48] */ flux[i][j][k][0] = rsd[i][j][k][3]; /* [48] */ u41 = rsd[i][j][k][3] / rsd[i][j][k][0]; /* [48] */ q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; /* [48] */ flux[i][j][k][1] = rsd[i][j][k][1] * u41; /* [48] */ flux[i][j][k][2] = rsd[i][j][k][2] * u41; /* [48] */ flux[i][j][k][3] = rsd[i][j][k][3] * u41 + 0.40e+00 * (rsd[i][j][k][4] - q); /* [48] */ flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41; } /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (k = 1; k <= nz - 2; k++) { /* [48] */ /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (m = 0; m < 5; m++) { /* [48] */ /* [48] */ frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]); } } /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (k = 1; k <= nz - 1; k++) { /* [48] */ /* [48] */ tmp = 1.0 / rsd[i][j][k][0]; /* [48] */ u21k = tmp * rsd[i][j][k][1]; /* [48] */ u31k = tmp * rsd[i][j][k][2]; /* [48] */ u41k = tmp * rsd[i][j][k][3]; /* [48] */ u51k = tmp * rsd[i][j][k][4]; /* [48] */ tmp = 1.0 / rsd[i][j][k - 1][0]; /* [48] */ u21km1 = tmp * rsd[i][j][k - 1][1]; /* [48] */ u31km1 = tmp * rsd[i][j][k - 1][2]; /* [48] */ u41km1 = tmp * rsd[i][j][k - 1][3]; /* [48] */ u51km1 = tmp * rsd[i][j][k - 1][4]; /* [48] */ flux[i][j][k][1] = tz3 * (u21k - u21km1); /* [48] */ flux[i][j][k][2] = tz3 * (u31k - u31km1); /* [48] */ flux[i][j][k][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /* [48] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * ((u21k * u21k + u31k * u31k + u41k * u41k) - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + (1.0 / 6.0) * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (k = 1; k <= nz - 2; k++) { /* [48] */ /* [48] */ frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * (rsd[i][j][k + 1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k - 1][0]); /* [48] */ frct[i][j][k][1] = frct[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (rsd[i][j][k + 1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k - 1][1]); /* [48] */ frct[i][j][k][2] = frct[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (rsd[i][j][k + 1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k - 1][2]); /* [48] */ frct[i][j][k][3] = frct[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (rsd[i][j][k + 1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k - 1][3]); /* [48] */ frct[i][j][k][4] = frct[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (rsd[i][j][k + 1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k - 1][4]); } /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (m = 0; m < 5; m++) { /* [48] */ /* [48] */ frct[i][j][1][m] = frct[i][j][1][m] - dsspm * (+5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m]); /* [48] */ frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (-4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]); } /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (k = 3; k <= nz - 4; k++) { /* [48] */ /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (m = 0; m < 5; m++) { /* [48] */ /* [48] */ frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]); } } /* [48] */ /* [48] */ /* [48] */ /* [48] */ for (m = 0; m < 5; m++) { /* [48] */ /* [48] */ frct[i][j][nz - 3][m] = frct[i][j][nz - 3][m] - dsspm * (rsd[i][j][nz - 5][m] - 4.0 * rsd[i][j][nz - 4][m] + 6.0 * rsd[i][j][nz - 3][m] - 4.0 * rsd[i][j][nz - 2][m]); /* [48] */ frct[i][j][nz - 2][m] = frct[i][j][nz - 2][m] - dsspm * (rsd[i][j][nz - 4][m] - 4.0 * rsd[i][j][nz - 3][m] + 5.0 * rsd[i][j][nz - 2][m]); } } } } } /* [] */ static void error() { /* [] */ /* [] */ int i; /* [] */ int j; /* [] */ int k; /* [] */ int m; /* [] */ int iglob; /* [] */ int jglob; /* [] */ double tmp; /* [] */ double u000ijk[5]; /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ errnm[m] = 0.0; } /* [] */ /* [] */ /* [] */ /* [] */ for (i = ist; i <= iend; i++) { /* [] */ /* [] */ iglob = i; /* [] */ /* [] */ /* [] */ /* [] */ for (j = jst; j <= jend; j++) { /* [] */ /* [] */ jglob = j; /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz - 2; k++) { /* [] */ /* [] */ exact(iglob, jglob, k, u000ijk); /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ tmp = (u000ijk[m] - u[i][j][k][m]); /* [] */ errnm[m] = errnm[m] + tmp * tmp; } } } } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ double _imopVarPre151; /* [] */ double _imopVarPre152; /* [] */ _imopVarPre151 = errnm[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /* [] */ _imopVarPre152 = sqrt(_imopVarPre151); /* [] */ /* [] */ errnm[m] = _imopVarPre152; } } /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ static void exact(int i, int j, int k, double u000ijk[5]) { /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ int m; /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ double xi; /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ double eta; /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ double zeta; /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ xi = ((double)i) / (nx0 - 1); /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ eta = ((double)j) / (ny0 - 1); /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ zeta = ((double)k) / (nz - 1); /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ for (m = 0; m < 5; m++) { /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ /* [1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54] */ u000ijk[m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ static void jacld(int k) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ int i; /* [13, 14, 25, 41] */ int j; /* [13, 14, 25, 41] */ double r43; /* [13, 14, 25, 41] */ double c1345; /* [13, 14, 25, 41] */ double c34; /* [13, 14, 25, 41] */ double tmp1; /* [13, 14, 25, 41] */ double tmp2; /* [13, 14, 25, 41] */ double tmp3; /* [13, 14, 25, 41] */ r43 = (4.0 / 3.0); /* [13, 14, 25, 41] */ c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00; /* [13, 14, 25, 41] */ c34 = 1.00e-01 * 1.00e+00; /* [13, 14, 25, 41] */ #pragma omp for nowait schedule(static) /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (i = ist; i <= iend; i++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ for (j = jst; j <= jend; j++) { /* [13, 14, 25, 41] */ /* [13, 14, 25, 41] */ tmp1 = 1.0 / u[i][j][k][0]; /* [13, 14, 25, 41] */ tmp2 = tmp1 * tmp1; /* [13, 14, 25, 41] */ tmp3 = tmp1 * tmp2; /* [13, 14, 25, 41] */ d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1); /* [13, 14, 25, 41] */ d[i][j][0][1] = 0.0; /* [13, 14, 25, 41] */ d[i][j][0][2] = 0.0; /* [13, 14, 25, 41] */ d[i][j][0][3] = 0.0; /* [13, 14, 25, 41] */ d[i][j][0][4] = 0.0; /* [13, 14, 25, 41] */ d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1])); /* [13, 14, 25, 41] */ d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2); /* [13, 14, 25, 41] */ d[i][j][1][2] = 0.0; /* [13, 14, 25, 41] */ d[i][j][1][3] = 0.0; /* [13, 14, 25, 41] */ d[i][j][1][4] = 0.0; /* [13, 14, 25, 41] */ d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2])); /* [13, 14, 25, 41] */ d[i][j][2][1] = 0.0; /* [13, 14, 25, 41] */ d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3); /* [13, 14, 25, 41] */ d[i][j][2][3] = 0.0; /* [13, 14, 25, 41] */ d[i][j][2][4] = 0.0; /* [13, 14, 25, 41] */ d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3])); /* [13, 14, 25, 41] */ d[i][j][3][1] = 0.0; /* [13, 14, 25, 41] */ d[i][j][3][2] = 0.0; /* [13, 14, 25, 41] */ d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4); /* [13, 14, 25, 41] */ d[i][j][3][4] = 0.0; /* [13, 14, 25, 41] */ d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4])); /* [13, 14, 25, 41] */ d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]); /* [13, 14, 25, 41] */ d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]); /* [13, 14, 25, 41] */ d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]); /* [13, 14, 25, 41] */ d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5); /* [13, 14, 25, 41] */ tmp1 = 1.0 / u[i][j][k - 1][0]; /* [13, 14, 25, 41] */ tmp2 = tmp1 * tmp1; /* [13, 14, 25, 41] */ tmp3 = tmp1 * tmp2; /* [13, 14, 25, 41] */ a[i][j][0][0] = -dt * tz1 * dz1; /* [13, 14, 25, 41] */ a[i][j][0][1] = 0.0; /* [13, 14, 25, 41] */ a[i][j][0][2] = 0.0; /* [13, 14, 25, 41] */ a[i][j][0][3] = -dt * tz2; /* [13, 14, 25, 41] */ a[i][j][0][4] = 0.0; /* [13, 14, 25, 41] */ a[i][j][1][0] = -dt * tz2 * (-(u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][1]); /* [13, 14, 25, 41] */ a[i][j][1][1] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2; /* [13, 14, 25, 41] */ a[i][j][1][2] = 0.0; /* [13, 14, 25, 41] */ a[i][j][1][3] = -dt * tz2 * (u[i][j][k - 1][1] * tmp1); /* [13, 14, 25, 41] */ a[i][j][1][4] = 0.0; /* [13, 14, 25, 41] */ a[i][j][2][0] = -dt * tz2 * (-(u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][2]); /* [13, 14, 25, 41] */ a[i][j][2][1] = 0.0; /* [13, 14, 25, 41] */ a[i][j][2][2] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3; /* [13, 14, 25, 41] */ a[i][j][2][3] = -dt * tz2 * (u[i][j][k - 1][2] * tmp1); /* [13, 14, 25, 41] */ a[i][j][2][4] = 0.0; /* [13, 14, 25, 41] */ a[i][j][3][0] = -dt * tz2 * (-(u[i][j][k - 1][3] * tmp1) * (u[i][j][k - 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k - 1][3]); /* [13, 14, 25, 41] */ a[i][j][3][1] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][1] * tmp1)); /* [13, 14, 25, 41] */ a[i][j][3][2] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][2] * tmp1)); /* [13, 14, 25, 41] */ a[i][j][3][3] = -dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4; /* [13, 14, 25, 41] */ a[i][j][3][4] = -dt * tz2 * 0.40e+00; /* [13, 14, 25, 41] */ a[i][j][4][0] = -dt * tz2 * ((0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k - 1][4] * tmp1)) * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k - 1][1] * u[i][j][k - 1][1]) - (c34 - c1345) * tmp3 * (u[i][j][k - 1][2] * u[i][j][k - 1][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k - 1][3] * u[i][j][k - 1][3]) - c1345 * tmp2 * u[i][j][k - 1][4]); /* [13, 14, 25, 41] */ a[i][j][4][1] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][1]; /* [13, 14, 25, 41] */ a[i][j][4][2] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][2]; /* [13, 14, 25, 41] */ a[i][j][4][3] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + 3.0 * u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k - 1][3]; /* [13, 14, 25, 41] */ a[i][j][4][4] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; /* [13, 14, 25, 41] */ tmp1 = 1.0 / u[i][j - 1][k][0]; /* [13, 14, 25, 41] */ tmp2 = tmp1 * tmp1; /* [13, 14, 25, 41] */ tmp3 = tmp1 * tmp2; /* [13, 14, 25, 41] */ b[i][j][0][0] = -dt * ty1 * dy1; /* [13, 14, 25, 41] */ b[i][j][0][1] = 0.0; /* [13, 14, 25, 41] */ b[i][j][0][2] = -dt * ty2; /* [13, 14, 25, 41] */ b[i][j][0][3] = 0.0; /* [13, 14, 25, 41] */ b[i][j][0][4] = 0.0; /* [13, 14, 25, 41] */ b[i][j][1][0] = -dt * ty2 * (-(u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][1]); /* [13, 14, 25, 41] */ b[i][j][1][1] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2; /* [13, 14, 25, 41] */ b[i][j][1][2] = -dt * ty2 * (u[i][j - 1][k][1] * tmp1); /* [13, 14, 25, 41] */ b[i][j][1][3] = 0.0; /* [13, 14, 25, 41] */ b[i][j][1][4] = 0.0; /* [13, 14, 25, 41] */ b[i][j][2][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * tmp1) * (u[i][j - 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j - 1][k][2]); /* [13, 14, 25, 41] */ b[i][j][2][1] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][1] * tmp1)); /* [13, 14, 25, 41] */ b[i][j][2][2] = -dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3; /* [13, 14, 25, 41] */ b[i][j][2][3] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][3] * tmp1)); /* [13, 14, 25, 41] */ b[i][j][2][4] = -dt * ty2 * 0.40e+00; /* [13, 14, 25, 41] */ b[i][j][3][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][3]); /* [13, 14, 25, 41] */ b[i][j][3][1] = 0.0; /* [13, 14, 25, 41] */ b[i][j][3][2] = -dt * ty2 * (u[i][j - 1][k][3] * tmp1); /* [13, 14, 25, 41] */ b[i][j][3][3] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4; /* [13, 14, 25, 41] */ b[i][j][3][4] = 0.0; /* [13, 14, 25, 41] */ b[i][j][4][0] = -dt * ty2 * ((0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j - 1][k][4] * tmp1)) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j - 1][k][1]) * (u[i][j - 1][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j - 1][k][2]) * (u[i][j - 1][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j - 1][k][3]) * (u[i][j - 1][k][3]))) - c1345 * tmp2 * u[i][j - 1][k][4]); /* [13, 14, 25, 41] */ b[i][j][4][1] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][1]; /* [13, 14, 25, 41] */ b[i][j][4][2] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + 3.0 * u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j - 1][k][2]; /* [13, 14, 25, 41] */ b[i][j][4][3] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][3]; /* [13, 14, 25, 41] */ b[i][j][4][4] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /* [13, 14, 25, 41] */ tmp1 = 1.0 / u[i - 1][j][k][0]; /* [13, 14, 25, 41] */ tmp2 = tmp1 * tmp1; /* [13, 14, 25, 41] */ tmp3 = tmp1 * tmp2; /* [13, 14, 25, 41] */ c[i][j][0][0] = -dt * tx1 * dx1; /* [13, 14, 25, 41] */ c[i][j][0][1] = -dt * tx2; /* [13, 14, 25, 41] */ c[i][j][0][2] = 0.0; /* [13, 14, 25, 41] */ c[i][j][0][3] = 0.0; /* [13, 14, 25, 41] */ c[i][j][0][4] = 0.0; /* [13, 14, 25, 41] */ c[i][j][1][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * tmp1) * (u[i - 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i - 1][j][k][1]); /* [13, 14, 25, 41] */ c[i][j][1][1] = -dt * tx2 * ((2.0 - 0.40e+00) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2; /* [13, 14, 25, 41] */ c[i][j][1][2] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][2] * tmp1)); /* [13, 14, 25, 41] */ c[i][j][1][3] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][3] * tmp1)); /* [13, 14, 25, 41] */ c[i][j][1][4] = -dt * tx2 * 0.40e+00; /* [13, 14, 25, 41] */ c[i][j][2][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][2]); /* [13, 14, 25, 41] */ c[i][j][2][1] = -dt * tx2 * (u[i - 1][j][k][2] * tmp1); /* [13, 14, 25, 41] */ c[i][j][2][2] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3; /* [13, 14, 25, 41] */ c[i][j][2][3] = 0.0; /* [13, 14, 25, 41] */ c[i][j][2][4] = 0.0; /* [13, 14, 25, 41] */ c[i][j][3][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][3]); /* [13, 14, 25, 41] */ c[i][j][3][1] = -dt * tx2 * (u[i - 1][j][k][3] * tmp1); /* [13, 14, 25, 41] */ c[i][j][3][2] = 0.0; /* [13, 14, 25, 41] */ c[i][j][3][3] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4; /* [13, 14, 25, 41] */ c[i][j][3][4] = 0.0; /* [13, 14, 25, 41] */ c[i][j][4][0] = -dt * tx2 * ((0.40e+00 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i - 1][j][k][4] * tmp1)) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i - 1][j][k][1]) * (u[i - 1][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i - 1][j][k][2]) * (u[i - 1][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i - 1][j][k][3]) * (u[i - 1][j][k][3]))) - c1345 * tmp2 * u[i - 1][j][k][4]); /* [13, 14, 25, 41] */ c[i][j][4][1] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i - 1][j][k][1]; /* [13, 14, 25, 41] */ c[i][j][4][2] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][2] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][2]; /* [13, 14, 25, 41] */ c[i][j][4][3] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][3] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][3]; /* [13, 14, 25, 41] */ c[i][j][4][4] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; } } } /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ static void jacu(int k) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ int i; /* [13, 14, 26, 42] */ int j; /* [13, 14, 26, 42] */ double r43; /* [13, 14, 26, 42] */ double c1345; /* [13, 14, 26, 42] */ double c34; /* [13, 14, 26, 42] */ double tmp1; /* [13, 14, 26, 42] */ double tmp2; /* [13, 14, 26, 42] */ double tmp3; /* [13, 14, 26, 42] */ r43 = (4.0 / 3.0); /* [13, 14, 26, 42] */ c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00; /* [13, 14, 26, 42] */ c34 = 1.00e-01 * 1.00e+00; /* [13, 14, 26, 42] */ #pragma omp for nowait schedule(static) /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (i = iend; i >= ist; i--) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ for (j = jend; j >= jst; j--) { /* [13, 14, 26, 42] */ /* [13, 14, 26, 42] */ tmp1 = 1.0 / u[i][j][k][0]; /* [13, 14, 26, 42] */ tmp2 = tmp1 * tmp1; /* [13, 14, 26, 42] */ tmp3 = tmp1 * tmp2; /* [13, 14, 26, 42] */ d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1); /* [13, 14, 26, 42] */ d[i][j][0][1] = 0.0; /* [13, 14, 26, 42] */ d[i][j][0][2] = 0.0; /* [13, 14, 26, 42] */ d[i][j][0][3] = 0.0; /* [13, 14, 26, 42] */ d[i][j][0][4] = 0.0; /* [13, 14, 26, 42] */ d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1])); /* [13, 14, 26, 42] */ d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2); /* [13, 14, 26, 42] */ d[i][j][1][2] = 0.0; /* [13, 14, 26, 42] */ d[i][j][1][3] = 0.0; /* [13, 14, 26, 42] */ d[i][j][1][4] = 0.0; /* [13, 14, 26, 42] */ d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2])); /* [13, 14, 26, 42] */ d[i][j][2][1] = 0.0; /* [13, 14, 26, 42] */ d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3); /* [13, 14, 26, 42] */ d[i][j][2][3] = 0.0; /* [13, 14, 26, 42] */ d[i][j][2][4] = 0.0; /* [13, 14, 26, 42] */ d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3])); /* [13, 14, 26, 42] */ d[i][j][3][1] = 0.0; /* [13, 14, 26, 42] */ d[i][j][3][2] = 0.0; /* [13, 14, 26, 42] */ d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4); /* [13, 14, 26, 42] */ d[i][j][3][4] = 0.0; /* [13, 14, 26, 42] */ d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4])); /* [13, 14, 26, 42] */ d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]); /* [13, 14, 26, 42] */ d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]); /* [13, 14, 26, 42] */ d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]); /* [13, 14, 26, 42] */ d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5); /* [13, 14, 26, 42] */ tmp1 = 1.0 / u[i + 1][j][k][0]; /* [13, 14, 26, 42] */ tmp2 = tmp1 * tmp1; /* [13, 14, 26, 42] */ tmp3 = tmp1 * tmp2; /* [13, 14, 26, 42] */ a[i][j][0][0] = -dt * tx1 * dx1; /* [13, 14, 26, 42] */ a[i][j][0][1] = dt * tx2; /* [13, 14, 26, 42] */ a[i][j][0][2] = 0.0; /* [13, 14, 26, 42] */ a[i][j][0][3] = 0.0; /* [13, 14, 26, 42] */ a[i][j][0][4] = 0.0; /* [13, 14, 26, 42] */ a[i][j][1][0] = dt * tx2 * (-(u[i + 1][j][k][1] * tmp1) * (u[i + 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i + 1][j][k][1]); /* [13, 14, 26, 42] */ a[i][j][1][1] = dt * tx2 * ((2.0 - 0.40e+00) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2; /* [13, 14, 26, 42] */ a[i][j][1][2] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][2] * tmp1)); /* [13, 14, 26, 42] */ a[i][j][1][3] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][3] * tmp1)); /* [13, 14, 26, 42] */ a[i][j][1][4] = dt * tx2 * 0.40e+00; /* [13, 14, 26, 42] */ a[i][j][2][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][2]); /* [13, 14, 26, 42] */ a[i][j][2][1] = dt * tx2 * (u[i + 1][j][k][2] * tmp1); /* [13, 14, 26, 42] */ a[i][j][2][2] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3; /* [13, 14, 26, 42] */ a[i][j][2][3] = 0.0; /* [13, 14, 26, 42] */ a[i][j][2][4] = 0.0; /* [13, 14, 26, 42] */ a[i][j][3][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][3]); /* [13, 14, 26, 42] */ a[i][j][3][1] = dt * tx2 * (u[i + 1][j][k][3] * tmp1); /* [13, 14, 26, 42] */ a[i][j][3][2] = 0.0; /* [13, 14, 26, 42] */ a[i][j][3][3] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4; /* [13, 14, 26, 42] */ a[i][j][3][4] = 0.0; /* [13, 14, 26, 42] */ a[i][j][4][0] = dt * tx2 * ((0.40e+00 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i + 1][j][k][4] * tmp1)) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i + 1][j][k][1]) * (u[i + 1][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i + 1][j][k][2]) * (u[i + 1][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i + 1][j][k][3]) * (u[i + 1][j][k][3]))) - c1345 * tmp2 * u[i + 1][j][k][4]); /* [13, 14, 26, 42] */ a[i][j][4][1] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i + 1][j][k][1]; /* [13, 14, 26, 42] */ a[i][j][4][2] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][2] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][2]; /* [13, 14, 26, 42] */ a[i][j][4][3] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][3] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][3]; /* [13, 14, 26, 42] */ a[i][j][4][4] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; /* [13, 14, 26, 42] */ tmp1 = 1.0 / u[i][j + 1][k][0]; /* [13, 14, 26, 42] */ tmp2 = tmp1 * tmp1; /* [13, 14, 26, 42] */ tmp3 = tmp1 * tmp2; /* [13, 14, 26, 42] */ b[i][j][0][0] = -dt * ty1 * dy1; /* [13, 14, 26, 42] */ b[i][j][0][1] = 0.0; /* [13, 14, 26, 42] */ b[i][j][0][2] = dt * ty2; /* [13, 14, 26, 42] */ b[i][j][0][3] = 0.0; /* [13, 14, 26, 42] */ b[i][j][0][4] = 0.0; /* [13, 14, 26, 42] */ b[i][j][1][0] = dt * ty2 * (-(u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][1]); /* [13, 14, 26, 42] */ b[i][j][1][1] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2; /* [13, 14, 26, 42] */ b[i][j][1][2] = dt * ty2 * (u[i][j + 1][k][1] * tmp1); /* [13, 14, 26, 42] */ b[i][j][1][3] = 0.0; /* [13, 14, 26, 42] */ b[i][j][1][4] = 0.0; /* [13, 14, 26, 42] */ b[i][j][2][0] = dt * ty2 * (-(u[i][j + 1][k][2] * tmp1) * (u[i][j + 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j + 1][k][2]); /* [13, 14, 26, 42] */ b[i][j][2][1] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][1] * tmp1)); /* [13, 14, 26, 42] */ b[i][j][2][2] = dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3; /* [13, 14, 26, 42] */ b[i][j][2][3] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][3] * tmp1)); /* [13, 14, 26, 42] */ b[i][j][2][4] = dt * ty2 * 0.40e+00; /* [13, 14, 26, 42] */ b[i][j][3][0] = dt * ty2 * (-(u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][3]); /* [13, 14, 26, 42] */ b[i][j][3][1] = 0.0; /* [13, 14, 26, 42] */ b[i][j][3][2] = dt * ty2 * (u[i][j + 1][k][3] * tmp1); /* [13, 14, 26, 42] */ b[i][j][3][3] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4; /* [13, 14, 26, 42] */ b[i][j][3][4] = 0.0; /* [13, 14, 26, 42] */ b[i][j][4][0] = dt * ty2 * ((0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j + 1][k][4] * tmp1)) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j + 1][k][1]) * (u[i][j + 1][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j + 1][k][2]) * (u[i][j + 1][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j + 1][k][3]) * (u[i][j + 1][k][3]))) - c1345 * tmp2 * u[i][j + 1][k][4]); /* [13, 14, 26, 42] */ b[i][j][4][1] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][1]; /* [13, 14, 26, 42] */ b[i][j][4][2] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + 3.0 * u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j + 1][k][2]; /* [13, 14, 26, 42] */ b[i][j][4][3] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][3]; /* [13, 14, 26, 42] */ b[i][j][4][4] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /* [13, 14, 26, 42] */ tmp1 = 1.0 / u[i][j][k + 1][0]; /* [13, 14, 26, 42] */ tmp2 = tmp1 * tmp1; /* [13, 14, 26, 42] */ tmp3 = tmp1 * tmp2; /* [13, 14, 26, 42] */ c[i][j][0][0] = -dt * tz1 * dz1; /* [13, 14, 26, 42] */ c[i][j][0][1] = 0.0; /* [13, 14, 26, 42] */ c[i][j][0][2] = 0.0; /* [13, 14, 26, 42] */ c[i][j][0][3] = dt * tz2; /* [13, 14, 26, 42] */ c[i][j][0][4] = 0.0; /* [13, 14, 26, 42] */ c[i][j][1][0] = dt * tz2 * (-(u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][1]); /* [13, 14, 26, 42] */ c[i][j][1][1] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2; /* [13, 14, 26, 42] */ c[i][j][1][2] = 0.0; /* [13, 14, 26, 42] */ c[i][j][1][3] = dt * tz2 * (u[i][j][k + 1][1] * tmp1); /* [13, 14, 26, 42] */ c[i][j][1][4] = 0.0; /* [13, 14, 26, 42] */ c[i][j][2][0] = dt * tz2 * (-(u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][2]); /* [13, 14, 26, 42] */ c[i][j][2][1] = 0.0; /* [13, 14, 26, 42] */ c[i][j][2][2] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3; /* [13, 14, 26, 42] */ c[i][j][2][3] = dt * tz2 * (u[i][j][k + 1][2] * tmp1); /* [13, 14, 26, 42] */ c[i][j][2][4] = 0.0; /* [13, 14, 26, 42] */ c[i][j][3][0] = dt * tz2 * (-(u[i][j][k + 1][3] * tmp1) * (u[i][j][k + 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k + 1][3]); /* [13, 14, 26, 42] */ c[i][j][3][1] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][1] * tmp1)); /* [13, 14, 26, 42] */ c[i][j][3][2] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][2] * tmp1)); /* [13, 14, 26, 42] */ c[i][j][3][3] = dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4; /* [13, 14, 26, 42] */ c[i][j][3][4] = dt * tz2 * 0.40e+00; /* [13, 14, 26, 42] */ c[i][j][4][0] = dt * tz2 * ((0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k + 1][4] * tmp1)) * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k + 1][1]) * (u[i][j][k + 1][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k + 1][2]) * (u[i][j][k + 1][2]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k + 1][3]) * (u[i][j][k + 1][3]))) - c1345 * tmp2 * u[i][j][k + 1][4]); /* [13, 14, 26, 42] */ c[i][j][4][1] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][1]; /* [13, 14, 26, 42] */ c[i][j][4][2] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][2]; /* [13, 14, 26, 42] */ c[i][j][4][3] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + 3.0 * u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k + 1][3]; /* [13, 14, 26, 42] */ c[i][j][4][4] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; } } } /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ static void l2norm(int nx0, int ny0, int nz0, int ist, int iend, int jst, int jend, double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5], double sum[5]) { /* [] */ /* [] */ int i; /* [] */ int j; /* [] */ int k; /* [] */ int m; /* [] */ double sum0 = 0.0; /* [] */ double sum1 = 0.0; /* [] */ double sum2 = 0.0; /* [] */ double sum3 = 0.0; /* [] */ double sum4 = 0.0; /* [] */ #pragma omp single nowait { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ sum[m] = 0.0; } } /* [] */ #pragma omp for nowait /* [] */ /* [] */ /* [] */ for (i = ist; i <= iend; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = jst; j <= jend; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz0 - 2; k++) { /* [] */ /* [] */ sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0]; /* [] */ sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1]; /* [] */ sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2]; /* [] */ sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3]; /* [] */ sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4]; } } } /* [] */ // #pragma omp dummyFlush CRITICAL_START /* [] */ #pragma omp critical { /* [] */ /* [] */ sum[0] += sum0; /* [] */ sum[1] += sum1; /* [] */ sum[2] += sum2; /* [] */ sum[3] += sum3; /* [] */ sum[4] += sum4; } /* [] */ // #pragma omp dummyFlush CRITICAL_END /* [] */ #pragma omp single nowait { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ double _imopVarPre154; /* [] */ double _imopVarPre155; /* [] */ _imopVarPre154 = sum[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /* [] */ _imopVarPre155 = sqrt(_imopVarPre154); /* [] */ /* [] */ sum[m] = _imopVarPre155; } } } /* [] */ static void pintgr() { /* [] */ /* [] */ int i; /* [] */ int j; /* [] */ int k; /* [] */ int ibeg; /* [] */ int ifin; /* [] */ int ifin1; /* [] */ int jbeg; /* [] */ int jfin; /* [] */ int jfin1; /* [] */ int iglob; /* [] */ int iglob1; /* [] */ int iglob2; /* [] */ int jglob; /* [] */ int jglob1; /* [] */ int jglob2; /* [] */ double phi1[12 + 2][12 + 2]; /* [] */ double phi2[12 + 2][12 + 2]; /* [] */ double frc1; /* [] */ double frc2; /* [] */ double frc3; /* [] */ ibeg = nx; /* [] */ ifin = 0; /* [] */ iglob1 = -1; /* [] */ iglob2 = nx - 1; /* [] */ int _imopVarPre157; /* [] */ _imopVarPre157 = iglob1 >= ii1; /* [] */ /* [] */ if (_imopVarPre157) { /* [] */ /* [] */ _imopVarPre157 = iglob2 < ii2 + nx; } /* [] */ /* [] */ if (_imopVarPre157) { /* [] */ /* [] */ ibeg = 0; } /* [] */ int _imopVarPre159; /* [] */ _imopVarPre159 = iglob1 >= ii1 - nx; /* [] */ /* [] */ if (_imopVarPre159) { /* [] */ /* [] */ _imopVarPre159 = iglob2 <= ii2; } /* [] */ /* [] */ if (_imopVarPre159) { /* [] */ /* [] */ ifin = nx; } /* [] */ int _imopVarPre161; /* [] */ _imopVarPre161 = ii1 >= iglob1; /* [] */ /* [] */ if (_imopVarPre161) { /* [] */ /* [] */ _imopVarPre161 = ii1 <= iglob2; } /* [] */ /* [] */ if (_imopVarPre161) { /* [] */ /* [] */ ibeg = ii1; } /* [] */ int _imopVarPre163; /* [] */ _imopVarPre163 = ii2 >= iglob1; /* [] */ /* [] */ if (_imopVarPre163) { /* [] */ /* [] */ _imopVarPre163 = ii2 <= iglob2; } /* [] */ /* [] */ if (_imopVarPre163) { /* [] */ /* [] */ ifin = ii2; } /* [] */ jbeg = ny; /* [] */ jfin = -1; /* [] */ jglob1 = 0; /* [] */ jglob2 = ny - 1; /* [] */ int _imopVarPre165; /* [] */ _imopVarPre165 = jglob1 >= ji1; /* [] */ /* [] */ if (_imopVarPre165) { /* [] */ /* [] */ _imopVarPre165 = jglob2 < ji2 + ny; } /* [] */ /* [] */ if (_imopVarPre165) { /* [] */ /* [] */ jbeg = 0; } /* [] */ int _imopVarPre167; /* [] */ _imopVarPre167 = jglob1 > ji1 - ny; /* [] */ /* [] */ if (_imopVarPre167) { /* [] */ /* [] */ _imopVarPre167 = jglob2 <= ji2; } /* [] */ /* [] */ if (_imopVarPre167) { /* [] */ /* [] */ jfin = ny; } /* [] */ int _imopVarPre169; /* [] */ _imopVarPre169 = ji1 >= jglob1; /* [] */ /* [] */ if (_imopVarPre169) { /* [] */ /* [] */ _imopVarPre169 = ji1 <= jglob2; } /* [] */ /* [] */ if (_imopVarPre169) { /* [] */ /* [] */ jbeg = ji1; } /* [] */ int _imopVarPre171; /* [] */ _imopVarPre171 = ji2 >= jglob1; /* [] */ /* [] */ if (_imopVarPre171) { /* [] */ /* [] */ _imopVarPre171 = ji2 <= jglob2; } /* [] */ /* [] */ if (_imopVarPre171) { /* [] */ /* [] */ jfin = ji2; } /* [] */ ifin1 = ifin; /* [] */ jfin1 = jfin; /* [] */ /* [] */ if (ifin1 == ii2) { /* [] */ /* [] */ ifin1 = ifin - 1; } /* [] */ /* [] */ if (jfin1 == ji2) { /* [] */ /* [] */ jfin1 = jfin - 1; } /* [] */ /* [] */ /* [] */ /* [] */ for (i = 0; i <= 12 + 1; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 0; k <= 12 + 1; k++) { /* [] */ /* [] */ phi1[i][k] = 0.0; /* [] */ phi2[i][k] = 0.0; } } /* [] */ /* [] */ /* [] */ /* [] */ for (i = ibeg; i <= ifin; i++) { /* [] */ /* [] */ iglob = i; /* [] */ /* [] */ /* [] */ /* [] */ for (j = jbeg; j <= jfin; j++) { /* [] */ /* [] */ jglob = j; /* [] */ k = ki1; /* [] */ phi1[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (((u[i][j][k][1]) * (u[i][j][k][1])) + ((u[i][j][k][2]) * (u[i][j][k][2])) + ((u[i][j][k][3]) * (u[i][j][k][3]))) / u[i][j][k][0]); /* [] */ k = ki2; /* [] */ phi2[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (((u[i][j][k][1]) * (u[i][j][k][1])) + ((u[i][j][k][2]) * (u[i][j][k][2])) + ((u[i][j][k][3]) * (u[i][j][k][3]))) / u[i][j][k][0]); } } /* [] */ frc1 = 0.0; /* [] */ /* [] */ /* [] */ /* [] */ for (i = ibeg; i <= ifin1; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = jbeg; j <= jfin1; j++) { /* [] */ /* [] */ frc1 = frc1 + (phi1[i][j] + phi1[i + 1][j] + phi1[i][j + 1] + phi1[i + 1][j + 1] + phi2[i][j] + phi2[i + 1][j] + phi2[i][j + 1] + phi2[i + 1][j + 1]); } } /* [] */ frc1 = dxi * deta * frc1; /* [] */ /* [] */ /* [] */ /* [] */ for (i = 0; i <= 12 + 1; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 0; k <= 12 + 1; k++) { /* [] */ /* [] */ phi1[i][k] = 0.0; /* [] */ phi2[i][k] = 0.0; } } /* [] */ jglob = jbeg; /* [] */ /* [] */ if (jglob == ji1) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (i = ibeg; i <= ifin; i++) { /* [] */ /* [] */ iglob = i; /* [] */ /* [] */ /* [] */ /* [] */ for (k = ki1; k <= ki2; k++) { /* [] */ /* [] */ phi1[i][k] = 0.40e+00 * (u[i][jbeg][k][4] - 0.50 * (((u[i][jbeg][k][1]) * (u[i][jbeg][k][1])) + ((u[i][jbeg][k][2]) * (u[i][jbeg][k][2])) + ((u[i][jbeg][k][3]) * (u[i][jbeg][k][3]))) / u[i][jbeg][k][0]); } } } /* [] */ jglob = jfin; /* [] */ /* [] */ if (jglob == ji2) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (i = ibeg; i <= ifin; i++) { /* [] */ /* [] */ iglob = i; /* [] */ /* [] */ /* [] */ /* [] */ for (k = ki1; k <= ki2; k++) { /* [] */ /* [] */ phi2[i][k] = 0.40e+00 * (u[i][jfin][k][4] - 0.50 * (((u[i][jfin][k][1]) * (u[i][jfin][k][1])) + ((u[i][jfin][k][2]) * (u[i][jfin][k][2])) + ((u[i][jfin][k][3]) * (u[i][jfin][k][3]))) / u[i][jfin][k][0]); } } } /* [] */ frc2 = 0.0; /* [] */ /* [] */ /* [] */ /* [] */ for (i = ibeg; i <= ifin1; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = ki1; k <= ki2 - 1; k++) { /* [] */ /* [] */ frc2 = frc2 + (phi1[i][k] + phi1[i + 1][k] + phi1[i][k + 1] + phi1[i + 1][k + 1] + phi2[i][k] + phi2[i + 1][k] + phi2[i][k + 1] + phi2[i + 1][k + 1]); } } /* [] */ frc2 = dxi * dzeta * frc2; /* [] */ /* [] */ /* [] */ /* [] */ for (i = 0; i <= 12 + 1; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 0; k <= 12 + 1; k++) { /* [] */ /* [] */ phi1[i][k] = 0.0; /* [] */ phi2[i][k] = 0.0; } } /* [] */ iglob = ibeg; /* [] */ /* [] */ if (iglob == ii1) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = jbeg; j <= jfin; j++) { /* [] */ /* [] */ jglob = j; /* [] */ /* [] */ /* [] */ /* [] */ for (k = ki1; k <= ki2; k++) { /* [] */ /* [] */ phi1[j][k] = 0.40e+00 * (u[ibeg][j][k][4] - 0.50 * (((u[ibeg][j][k][1]) * (u[ibeg][j][k][1])) + ((u[ibeg][j][k][2]) * (u[ibeg][j][k][2])) + ((u[ibeg][j][k][3]) * (u[ibeg][j][k][3]))) / u[ibeg][j][k][0]); } } } /* [] */ iglob = ifin; /* [] */ /* [] */ if (iglob == ii2) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = jbeg; j <= jfin; j++) { /* [] */ /* [] */ jglob = j; /* [] */ /* [] */ /* [] */ /* [] */ for (k = ki1; k <= ki2; k++) { /* [] */ /* [] */ phi2[j][k] = 0.40e+00 * (u[ifin][j][k][4] - 0.50 * (((u[ifin][j][k][1]) * (u[ifin][j][k][1])) + ((u[ifin][j][k][2]) * (u[ifin][j][k][2])) + ((u[ifin][j][k][3]) * (u[ifin][j][k][3]))) / u[ifin][j][k][0]); } } } /* [] */ frc3 = 0.0; /* [] */ /* [] */ /* [] */ /* [] */ for (j = jbeg; j <= jfin1; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = ki1; k <= ki2 - 1; k++) { /* [] */ /* [] */ frc3 = frc3 + (phi1[j][k] + phi1[j + 1][k] + phi1[j][k + 1] + phi1[j + 1][k + 1] + phi2[j][k] + phi2[j + 1][k] + phi2[j][k + 1] + phi2[j + 1][k + 1]); } } /* [] */ frc3 = deta * dzeta * frc3; /* [] */ frc = 0.25 * (frc1 + frc2 + frc3); } /* [] */ static void read_input() { /* [] */ /* [] */ FILE *fp; /* [] */ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - LU Benchmark\n\n"); /* [] */ /* [] */ fp = fopen("inputlu.data", "r"); /* [] */ /* [] */ /* [] */ if (fp != ((void *)0)) { /* [] */ /* [] */ printf(" Reading from input file inputlu.data\n"); /* [] */ /* [] */ int _imopVarPre173; /* [] */ _imopVarPre173 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre173 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre173 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre175; /* [] */ _imopVarPre175 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre175 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre175 = fgetc(fp); /* [] */ } /* [] */ int *_imopVarPre178; /* [] */ int *_imopVarPre179; /* [] */ _imopVarPre178 = &inorm; /* [] */ _imopVarPre179 = &ipr; /* [] */ fscanf(fp, "%d%d", _imopVarPre179, _imopVarPre178); /* [] */ /* [] */ int _imopVarPre181; /* [] */ _imopVarPre181 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre181 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre181 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre183; /* [] */ _imopVarPre183 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre183 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre183 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre185; /* [] */ _imopVarPre185 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre185 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre185 = fgetc(fp); /* [] */ } /* [] */ int *_imopVarPre187; /* [] */ _imopVarPre187 = &itmax; /* [] */ fscanf(fp, "%d", _imopVarPre187); /* [] */ /* [] */ int _imopVarPre189; /* [] */ _imopVarPre189 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre189 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre189 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre191; /* [] */ _imopVarPre191 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre191 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre191 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre193; /* [] */ _imopVarPre193 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre193 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre193 = fgetc(fp); /* [] */ } /* [] */ double *_imopVarPre195; /* [] */ _imopVarPre195 = &dt; /* [] */ fscanf(fp, "%lf", _imopVarPre195); /* [] */ /* [] */ int _imopVarPre197; /* [] */ _imopVarPre197 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre197 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre197 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre199; /* [] */ _imopVarPre199 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre199 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre199 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre201; /* [] */ _imopVarPre201 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre201 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre201 = fgetc(fp); /* [] */ } /* [] */ double *_imopVarPre203; /* [] */ _imopVarPre203 = &omega; /* [] */ fscanf(fp, "%lf", _imopVarPre203); /* [] */ /* [] */ int _imopVarPre205; /* [] */ _imopVarPre205 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre205 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre205 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre207; /* [] */ _imopVarPre207 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre207 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre207 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre209; /* [] */ _imopVarPre209 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre209 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre209 = fgetc(fp); /* [] */ } /* [] */ double *_imopVarPre215; /* [] */ double *_imopVarPre216; /* [] */ double *_imopVarPre217; /* [] */ double *_imopVarPre218; /* [] */ double *_imopVarPre219; /* [] */ _imopVarPre215 = &tolrsd[4]; /* [] */ _imopVarPre216 = &tolrsd[3]; /* [] */ _imopVarPre217 = &tolrsd[2]; /* [] */ _imopVarPre218 = &tolrsd[1]; /* [] */ _imopVarPre219 = &tolrsd[0]; /* [] */ fscanf(fp, "%lf%lf%lf%lf%lf", _imopVarPre219, _imopVarPre218, _imopVarPre217, _imopVarPre216, _imopVarPre215); /* [] */ /* [] */ int _imopVarPre221; /* [] */ _imopVarPre221 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre221 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre221 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre223; /* [] */ _imopVarPre223 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre223 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre223 = fgetc(fp); /* [] */ } /* [] */ int _imopVarPre225; /* [] */ _imopVarPre225 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre225 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre225 = fgetc(fp); /* [] */ } /* [] */ int *_imopVarPre229; /* [] */ int *_imopVarPre230; /* [] */ int *_imopVarPre231; /* [] */ _imopVarPre229 = &nz0; /* [] */ _imopVarPre230 = &ny0; /* [] */ _imopVarPre231 = &nx0; /* [] */ fscanf(fp, "%d%d%d", _imopVarPre231, _imopVarPre230, _imopVarPre229); /* [] */ /* [] */ int _imopVarPre233; /* [] */ _imopVarPre233 = fgetc(fp); /* [] */ /* [] */ /* [] */ while (_imopVarPre233 != '\n') { /* [] */ /* [] */ ; /* [] */ _imopVarPre233 = fgetc(fp); /* [] */ } /* [] */ fclose(fp); /* [] */ } else { /* [] */ /* [] */ ipr = 1; /* [] */ inorm = 50; /* [] */ itmax = 50; /* [] */ dt = 0.5; /* [] */ omega = 1.2; /* [] */ tolrsd[0] = 1.0e-8; /* [] */ tolrsd[1] = 1.0e-8; /* [] */ tolrsd[2] = 1.0e-8; /* [] */ tolrsd[3] = 1.0e-8; /* [] */ tolrsd[4] = 1.0e-8; /* [] */ nx0 = 12; /* [] */ ny0 = 12; /* [] */ nz0 = 12; } /* [] */ int _imopVarPre234; /* [] */ int _imopVarPre235; /* [] */ _imopVarPre234 = nx0 < 4; /* [] */ /* [] */ if (!_imopVarPre234) { /* [] */ /* [] */ _imopVarPre235 = ny0 < 4; /* [] */ /* [] */ if (!_imopVarPre235) { /* [] */ /* [] */ _imopVarPre235 = nz0 < 4; } /* [] */ _imopVarPre234 = _imopVarPre235; } /* [] */ /* [] */ if (_imopVarPre234) { /* [] */ /* [] */ printf(" PROBLEM SIZE IS TOO SMALL - \n" " SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n"); /* [] */ /* [] */ exit(1); /* [] */ } /* [] */ int _imopVarPre236; /* [] */ int _imopVarPre237; /* [] */ _imopVarPre236 = nx0 > 12; /* [] */ /* [] */ if (!_imopVarPre236) { /* [] */ /* [] */ _imopVarPre237 = ny0 > 12; /* [] */ /* [] */ if (!_imopVarPre237) { /* [] */ /* [] */ _imopVarPre237 = nz0 > 12; } /* [] */ _imopVarPre236 = _imopVarPre237; } /* [] */ /* [] */ if (_imopVarPre236) { /* [] */ /* [] */ printf(" PROBLEM SIZE IS TOO LARGE - \n" " NX, NY AND NZ SHOULD BE EQUAL TO \n" " ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n"); /* [] */ /* [] */ exit(1); /* [] */ } /* [] */ printf(" Size: %3dx%3dx%3d\n", nx0, ny0, nz0); /* [] */ /* [] */ printf(" Iterations: %3d\n", itmax); /* [] */ } /* [] */ static void rhs() { /* [] */ /* [] */ int i; /* [] */ int j; /* [] */ int k; /* [] */ int m; /* [] */ int L1; /* [] */ int L2; /* [] */ int ist1; /* [] */ int iend1; /* [] */ int jst1; /* [] */ int jend1; /* [] */ double q; /* [] */ double u21; /* [] */ double u31; /* [] */ double u41; /* [] */ double tmp; /* [] */ double u21i; /* [] */ double u31i; /* [] */ double u41i; /* [] */ double u51i; /* [] */ double u21j; /* [] */ double u31j; /* [] */ double u41j; /* [] */ double u51j; /* [] */ double u21k; /* [] */ double u31k; /* [] */ double u41k; /* [] */ double u51k; /* [] */ double u21im1; /* [] */ double u31im1; /* [] */ double u41im1; /* [] */ double u51im1; /* [] */ double u21jm1; /* [] */ double u31jm1; /* [] */ double u41jm1; /* [] */ double u51jm1; /* [] */ double u21km1; /* [] */ double u31km1; /* [] */ double u41km1; /* [] */ double u51km1; /* [] */ #pragma omp for nowait /* [] */ /* [] */ /* [] */ for (i = 0; i <= nx - 1; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = 0; j <= ny - 1; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 0; k <= nz - 1; k++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][k][m] = -frct[i][j][k][m]; } } } } /* [] */ L1 = 0; /* [] */ L2 = nx - 1; /* [] */ #pragma omp for nowait /* [] */ /* [] */ /* [] */ for (i = L1; i <= L2; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = jst; j <= jend; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz - 2; k++) { /* [] */ /* [] */ flux[i][j][k][0] = u[i][j][k][1]; /* [] */ u21 = u[i][j][k][1] / u[i][j][k][0]; /* [] */ q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; /* [] */ flux[i][j][k][1] = u[i][j][k][1] * u21 + 0.40e+00 * (u[i][j][k][4] - q); /* [] */ flux[i][j][k][2] = u[i][j][k][2] * u21; /* [] */ flux[i][j][k][3] = u[i][j][k][3] * u21; /* [] */ flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u21; } } } /* [] */ #pragma omp for nowait /* [] */ /* [] */ /* [] */ for (j = jst; j <= jend; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz - 2; k++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (i = ist; i <= iend; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][k][m] = rsd[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]); } } /* [] */ L2 = nx - 1; /* [] */ /* [] */ /* [] */ /* [] */ for (i = ist; i <= L2; i++) { /* [] */ /* [] */ tmp = 1.0 / u[i][j][k][0]; /* [] */ u21i = tmp * u[i][j][k][1]; /* [] */ u31i = tmp * u[i][j][k][2]; /* [] */ u41i = tmp * u[i][j][k][3]; /* [] */ u51i = tmp * u[i][j][k][4]; /* [] */ tmp = 1.0 / u[i - 1][j][k][0]; /* [] */ u21im1 = tmp * u[i - 1][j][k][1]; /* [] */ u31im1 = tmp * u[i - 1][j][k][2]; /* [] */ u41im1 = tmp * u[i - 1][j][k][3]; /* [] */ u51im1 = tmp * u[i - 1][j][k][4]; /* [] */ flux[i][j][k][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /* [] */ flux[i][j][k][2] = tx3 * (u31i - u31im1); /* [] */ flux[i][j][k][3] = tx3 * (u41i - u41im1); /* [] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /* [] */ /* [] */ /* [] */ /* [] */ for (i = ist; i <= iend; i++) { /* [] */ /* [] */ rsd[i][j][k][0] = rsd[i][j][k][0] + dx1 * tx1 * (u[i - 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i + 1][j][k][0]); /* [] */ rsd[i][j][k][1] = rsd[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (u[i - 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i + 1][j][k][1]); /* [] */ rsd[i][j][k][2] = rsd[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (u[i - 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i + 1][j][k][2]); /* [] */ rsd[i][j][k][3] = rsd[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (u[i - 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i + 1][j][k][3]); /* [] */ rsd[i][j][k][4] = rsd[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (u[i - 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i + 1][j][k][4]); } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[1][j][k][m] = rsd[1][j][k][m] - dssp * (+5.0 * u[1][j][k][m] - 4.0 * u[2][j][k][m] + u[3][j][k][m]); /* [] */ rsd[2][j][k][m] = rsd[2][j][k][m] - dssp * (-4.0 * u[1][j][k][m] + 6.0 * u[2][j][k][m] - 4.0 * u[3][j][k][m] + u[4][j][k][m]); } /* [] */ ist1 = 3; /* [] */ iend1 = nx - 4; /* [] */ /* [] */ /* [] */ /* [] */ for (i = ist1; i <= iend1; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]); } } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[nx - 3][j][k][m] = rsd[nx - 3][j][k][m] - dssp * (u[nx - 5][j][k][m] - 4.0 * u[nx - 4][j][k][m] + 6.0 * u[nx - 3][j][k][m] - 4.0 * u[nx - 2][j][k][m]); /* [] */ rsd[nx - 2][j][k][m] = rsd[nx - 2][j][k][m] - dssp * (u[nx - 4][j][k][m] - 4.0 * u[nx - 3][j][k][m] + 5.0 * u[nx - 2][j][k][m]); } } } /* [] */ L1 = 0; /* [] */ L2 = ny - 1; /* [] */ #pragma omp for nowait /* [] */ /* [] */ /* [] */ for (i = ist; i <= iend; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = L1; j <= L2; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz - 2; k++) { /* [] */ /* [] */ flux[i][j][k][0] = u[i][j][k][2]; /* [] */ u31 = u[i][j][k][2] / u[i][j][k][0]; /* [] */ q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; /* [] */ flux[i][j][k][1] = u[i][j][k][1] * u31; /* [] */ flux[i][j][k][2] = u[i][j][k][2] * u31 + 0.40e+00 * (u[i][j][k][4] - q); /* [] */ flux[i][j][k][3] = u[i][j][k][3] * u31; /* [] */ flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u31; } } } /* [] */ #pragma omp for nowait /* [] */ /* [] */ /* [] */ for (i = ist; i <= iend; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz - 2; k++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = jst; j <= jend; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][k][m] = rsd[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]); } } /* [] */ L2 = ny - 1; /* [] */ /* [] */ /* [] */ /* [] */ for (j = jst; j <= L2; j++) { /* [] */ /* [] */ tmp = 1.0 / u[i][j][k][0]; /* [] */ u21j = tmp * u[i][j][k][1]; /* [] */ u31j = tmp * u[i][j][k][2]; /* [] */ u41j = tmp * u[i][j][k][3]; /* [] */ u51j = tmp * u[i][j][k][4]; /* [] */ tmp = 1.0 / u[i][j - 1][k][0]; /* [] */ u21jm1 = tmp * u[i][j - 1][k][1]; /* [] */ u31jm1 = tmp * u[i][j - 1][k][2]; /* [] */ u41jm1 = tmp * u[i][j - 1][k][3]; /* [] */ u51jm1 = tmp * u[i][j - 1][k][4]; /* [] */ flux[i][j][k][1] = ty3 * (u21j - u21jm1); /* [] */ flux[i][j][k][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /* [] */ flux[i][j][k][3] = ty3 * (u41j - u41jm1); /* [] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /* [] */ /* [] */ /* [] */ /* [] */ for (j = jst; j <= jend; j++) { /* [] */ /* [] */ rsd[i][j][k][0] = rsd[i][j][k][0] + dy1 * ty1 * (u[i][j - 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j + 1][k][0]); /* [] */ rsd[i][j][k][1] = rsd[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (u[i][j - 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j + 1][k][1]); /* [] */ rsd[i][j][k][2] = rsd[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (u[i][j - 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j + 1][k][2]); /* [] */ rsd[i][j][k][3] = rsd[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (u[i][j - 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j + 1][k][3]); /* [] */ rsd[i][j][k][4] = rsd[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (u[i][j - 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j + 1][k][4]); } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][1][k][m] = rsd[i][1][k][m] - dssp * (+5.0 * u[i][1][k][m] - 4.0 * u[i][2][k][m] + u[i][3][k][m]); /* [] */ rsd[i][2][k][m] = rsd[i][2][k][m] - dssp * (-4.0 * u[i][1][k][m] + 6.0 * u[i][2][k][m] - 4.0 * u[i][3][k][m] + u[i][4][k][m]); } /* [] */ jst1 = 3; /* [] */ jend1 = ny - 4; /* [] */ /* [] */ /* [] */ /* [] */ for (j = jst1; j <= jend1; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]); } } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][ny - 3][k][m] = rsd[i][ny - 3][k][m] - dssp * (u[i][ny - 5][k][m] - 4.0 * u[i][ny - 4][k][m] + 6.0 * u[i][ny - 3][k][m] - 4.0 * u[i][ny - 2][k][m]); /* [] */ rsd[i][ny - 2][k][m] = rsd[i][ny - 2][k][m] - dssp * (u[i][ny - 4][k][m] - 4.0 * u[i][ny - 3][k][m] + 5.0 * u[i][ny - 2][k][m]); } } } /* [] */ #pragma omp for nowait /* [] */ /* [] */ /* [] */ for (i = ist; i <= iend; i++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (j = jst; j <= jend; j++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (k = 0; k <= nz - 1; k++) { /* [] */ /* [] */ flux[i][j][k][0] = u[i][j][k][3]; /* [] */ u41 = u[i][j][k][3] / u[i][j][k][0]; /* [] */ q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; /* [] */ flux[i][j][k][1] = u[i][j][k][1] * u41; /* [] */ flux[i][j][k][2] = u[i][j][k][2] * u41; /* [] */ flux[i][j][k][3] = u[i][j][k][3] * u41 + 0.40e+00 * (u[i][j][k][4] - q); /* [] */ flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u41; } /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz - 2; k++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][k][m] = rsd[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]); } } /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz - 1; k++) { /* [] */ /* [] */ tmp = 1.0 / u[i][j][k][0]; /* [] */ u21k = tmp * u[i][j][k][1]; /* [] */ u31k = tmp * u[i][j][k][2]; /* [] */ u41k = tmp * u[i][j][k][3]; /* [] */ u51k = tmp * u[i][j][k][4]; /* [] */ tmp = 1.0 / u[i][j][k - 1][0]; /* [] */ u21km1 = tmp * u[i][j][k - 1][1]; /* [] */ u31km1 = tmp * u[i][j][k - 1][2]; /* [] */ u41km1 = tmp * u[i][j][k - 1][3]; /* [] */ u51km1 = tmp * u[i][j][k - 1][4]; /* [] */ flux[i][j][k][1] = tz3 * (u21k - u21km1); /* [] */ flux[i][j][k][2] = tz3 * (u31k - u31km1); /* [] */ flux[i][j][k][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /* [] */ flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /* [] */ /* [] */ /* [] */ /* [] */ for (k = 1; k <= nz - 2; k++) { /* [] */ /* [] */ rsd[i][j][k][0] = rsd[i][j][k][0] + dz1 * tz1 * (u[i][j][k - 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k + 1][0]); /* [] */ rsd[i][j][k][1] = rsd[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (u[i][j][k - 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k + 1][1]); /* [] */ rsd[i][j][k][2] = rsd[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (u[i][j][k - 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k + 1][2]); /* [] */ rsd[i][j][k][3] = rsd[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (u[i][j][k - 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k + 1][3]); /* [] */ rsd[i][j][k][4] = rsd[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (u[i][j][k - 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k + 1][4]); } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][1][m] = rsd[i][j][1][m] - dssp * (+5.0 * u[i][j][1][m] - 4.0 * u[i][j][2][m] + u[i][j][3][m]); /* [] */ rsd[i][j][2][m] = rsd[i][j][2][m] - dssp * (-4.0 * u[i][j][1][m] + 6.0 * u[i][j][2][m] - 4.0 * u[i][j][3][m] + u[i][j][4][m]); } /* [] */ /* [] */ /* [] */ /* [] */ for (k = 3; k <= nz - 4; k++) { /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]); } } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ rsd[i][j][nz - 3][m] = rsd[i][j][nz - 3][m] - dssp * (u[i][j][nz - 5][m] - 4.0 * u[i][j][nz - 4][m] + 6.0 * u[i][j][nz - 3][m] - 4.0 * u[i][j][nz - 2][m]); /* [] */ rsd[i][j][nz - 2][m] = rsd[i][j][nz - 2][m] - dssp * (u[i][j][nz - 4][m] - 4.0 * u[i][j][nz - 3][m] + 5.0 * u[i][j][nz - 2][m]); } } } } /* [] */ static void setbv() { /* [] */ /* [49] */ #pragma omp parallel { /* [49] */ /* [49] */ int i; /* [49] */ int j; /* [49] */ int k; /* [49] */ int iglob; /* [49] */ int jglob; /* [49] */ #pragma omp for nowait /* [49] */ /* [49] */ /* [49] */ for (i = 0; i < nx; i++) { /* [49] */ /* [49] */ iglob = i; /* [49] */ /* [49] */ /* [49] */ /* [49] */ for (j = 0; j < ny; j++) { /* [49] */ /* [49] */ jglob = j; /* [49] */ double *_imopVarPre239; /* [49] */ _imopVarPre239 = &u[i][j][0][0]; /* [49] */ exact(iglob, jglob, 0, _imopVarPre239); /* [49] */ /* [49] */ double *_imopVarPre242; /* [49] */ int _imopVarPre243; /* [49] */ _imopVarPre242 = &u[i][j][nz - 1][0]; /* [49] */ _imopVarPre243 = nz - 1; /* [49] */ exact(iglob, jglob, _imopVarPre243, _imopVarPre242); /* [49] */ } } /* [49] */ // #pragma omp dummyFlush BARRIER_START /* [49] */ #pragma omp barrier /* [50] */ #pragma omp for nowait /* [50] */ /* [50] */ /* [50] */ for (i = 0; i < nx; i++) { /* [50] */ /* [50] */ iglob = i; /* [50] */ /* [50] */ /* [50] */ /* [50] */ for (k = 0; k < nz; k++) { /* [50] */ /* [50] */ double *_imopVarPre245; /* [50] */ _imopVarPre245 = &u[i][0][k][0]; /* [50] */ exact(iglob, 0, k, _imopVarPre245); /* [50] */ } } /* [50] */ // #pragma omp dummyFlush BARRIER_START /* [50] */ #pragma omp barrier /* [51] */ #pragma omp for nowait /* [51] */ /* [51] */ /* [51] */ for (i = 0; i < nx; i++) { /* [51] */ /* [51] */ iglob = i; /* [51] */ /* [51] */ /* [51] */ /* [51] */ for (k = 0; k < nz; k++) { /* [51] */ /* [51] */ double *_imopVarPre248; /* [51] */ int _imopVarPre249; /* [51] */ _imopVarPre248 = &u[i][ny - 1][k][0]; /* [51] */ _imopVarPre249 = ny0 - 1; /* [51] */ exact(iglob, _imopVarPre249, k, _imopVarPre248); /* [51] */ } } /* [51] */ // #pragma omp dummyFlush BARRIER_START /* [51] */ #pragma omp barrier /* [52] */ #pragma omp for nowait /* [52] */ /* [52] */ /* [52] */ for (j = 0; j < ny; j++) { /* [52] */ /* [52] */ jglob = j; /* [52] */ /* [52] */ /* [52] */ /* [52] */ for (k = 0; k < nz; k++) { /* [52] */ /* [52] */ double *_imopVarPre251; /* [52] */ _imopVarPre251 = &u[0][j][k][0]; /* [52] */ exact(0, jglob, k, _imopVarPre251); /* [52] */ } } /* [52] */ // #pragma omp dummyFlush BARRIER_START /* [52] */ #pragma omp barrier /* [53] */ #pragma omp for nowait /* [53] */ /* [53] */ /* [53] */ for (j = 0; j < ny; j++) { /* [53] */ /* [53] */ jglob = j; /* [53] */ /* [53] */ /* [53] */ /* [53] */ for (k = 0; k < nz; k++) { /* [53] */ /* [53] */ double *_imopVarPre254; /* [53] */ int _imopVarPre255; /* [53] */ _imopVarPre254 = &u[nx - 1][j][k][0]; /* [53] */ _imopVarPre255 = nx0 - 1; /* [53] */ exact(_imopVarPre255, jglob, k, _imopVarPre254); /* [53] */ } } } } /* [] */ static void setcoeff() { /* [] */ /* [] */ dxi = 1.0 / (nx0 - 1); /* [] */ deta = 1.0 / (ny0 - 1); /* [] */ dzeta = 1.0 / (nz0 - 1); /* [] */ tx1 = 1.0 / (dxi * dxi); /* [] */ tx2 = 1.0 / (2.0 * dxi); /* [] */ tx3 = 1.0 / dxi; /* [] */ ty1 = 1.0 / (deta * deta); /* [] */ ty2 = 1.0 / (2.0 * deta); /* [] */ ty3 = 1.0 / deta; /* [] */ tz1 = 1.0 / (dzeta * dzeta); /* [] */ tz2 = 1.0 / (2.0 * dzeta); /* [] */ tz3 = 1.0 / dzeta; /* [] */ ii1 = 1; /* [] */ ii2 = nx0 - 2; /* [] */ ji1 = 1; /* [] */ ji2 = ny0 - 3; /* [] */ ki1 = 2; /* [] */ ki2 = nz0 - 2; /* [] */ dx1 = 0.75; /* [] */ dx2 = dx1; /* [] */ dx3 = dx1; /* [] */ dx4 = dx1; /* [] */ dx5 = dx1; /* [] */ dy1 = 0.75; /* [] */ dy2 = dy1; /* [] */ dy3 = dy1; /* [] */ dy4 = dy1; /* [] */ dy5 = dy1; /* [] */ dz1 = 1.00; /* [] */ dz2 = dz1; /* [] */ dz3 = dz1; /* [] */ dz4 = dz1; /* [] */ dz5 = dz1; /* [] */ int _imopVarPre348; /* [] */ double _imopVarPre349; /* [] */ int _imopVarPre350; /* [] */ double _imopVarPre351; /* [] */ int _imopVarPre358; /* [] */ double _imopVarPre359; /* [] */ _imopVarPre348 = (dy1 > dz1); /* [] */ /* [] */ if (_imopVarPre348) { /* [] */ /* [] */ _imopVarPre349 = dy1; } else { /* [] */ /* [] */ _imopVarPre349 = dz1; } /* [] */ _imopVarPre350 = (dx1 > _imopVarPre349); /* [] */ /* [] */ if (_imopVarPre350) { /* [] */ /* [] */ _imopVarPre351 = dx1; } else { /* [] */ /* [] */ _imopVarPre358 = (dy1 > dz1); /* [] */ /* [] */ if (_imopVarPre358) { /* [] */ /* [] */ _imopVarPre359 = dy1; } else { /* [] */ /* [] */ _imopVarPre359 = dz1; } /* [] */ _imopVarPre351 = _imopVarPre359; } /* [] */ dssp = _imopVarPre351 / 4.0; /* [] */ ce[0][0] = 2.0; /* [] */ ce[0][1] = 0.0; /* [] */ ce[0][2] = 0.0; /* [] */ ce[0][3] = 4.0; /* [] */ ce[0][4] = 5.0; /* [] */ ce[0][5] = 3.0; /* [] */ ce[0][6] = 5.0e-01; /* [] */ ce[0][7] = 2.0e-02; /* [] */ ce[0][8] = 1.0e-02; /* [] */ ce[0][9] = 3.0e-02; /* [] */ ce[0][10] = 5.0e-01; /* [] */ ce[0][11] = 4.0e-01; /* [] */ ce[0][12] = 3.0e-01; /* [] */ ce[1][0] = 1.0; /* [] */ ce[1][1] = 0.0; /* [] */ ce[1][2] = 0.0; /* [] */ ce[1][3] = 0.0; /* [] */ ce[1][4] = 1.0; /* [] */ ce[1][5] = 2.0; /* [] */ ce[1][6] = 3.0; /* [] */ ce[1][7] = 1.0e-02; /* [] */ ce[1][8] = 3.0e-02; /* [] */ ce[1][9] = 2.0e-02; /* [] */ ce[1][10] = 4.0e-01; /* [] */ ce[1][11] = 3.0e-01; /* [] */ ce[1][12] = 5.0e-01; /* [] */ ce[2][0] = 2.0; /* [] */ ce[2][1] = 2.0; /* [] */ ce[2][2] = 0.0; /* [] */ ce[2][3] = 0.0; /* [] */ ce[2][4] = 0.0; /* [] */ ce[2][5] = 2.0; /* [] */ ce[2][6] = 3.0; /* [] */ ce[2][7] = 4.0e-02; /* [] */ ce[2][8] = 3.0e-02; /* [] */ ce[2][9] = 5.0e-02; /* [] */ ce[2][10] = 3.0e-01; /* [] */ ce[2][11] = 5.0e-01; /* [] */ ce[2][12] = 4.0e-01; /* [] */ ce[3][0] = 2.0; /* [] */ ce[3][1] = 2.0; /* [] */ ce[3][2] = 0.0; /* [] */ ce[3][3] = 0.0; /* [] */ ce[3][4] = 0.0; /* [] */ ce[3][5] = 2.0; /* [] */ ce[3][6] = 3.0; /* [] */ ce[3][7] = 3.0e-02; /* [] */ ce[3][8] = 5.0e-02; /* [] */ ce[3][9] = 4.0e-02; /* [] */ ce[3][10] = 2.0e-01; /* [] */ ce[3][11] = 1.0e-01; /* [] */ ce[3][12] = 3.0e-01; /* [] */ ce[4][0] = 5.0; /* [] */ ce[4][1] = 4.0; /* [] */ ce[4][2] = 3.0; /* [] */ ce[4][3] = 2.0; /* [] */ ce[4][4] = 1.0e-01; /* [] */ ce[4][5] = 4.0e-01; /* [] */ ce[4][6] = 3.0e-01; /* [] */ ce[4][7] = 5.0e-02; /* [] */ ce[4][8] = 4.0e-02; /* [] */ ce[4][9] = 3.0e-02; /* [] */ ce[4][10] = 1.0e-01; /* [] */ ce[4][11] = 3.0e-01; /* [] */ ce[4][12] = 2.0e-01; } /* [] */ static void setiv() { /* [] */ /* [54] */ #pragma omp parallel { /* [54] */ /* [54] */ int i; /* [54] */ int j; /* [54] */ int k; /* [54] */ int m; /* [54] */ int iglob; /* [54] */ int jglob; /* [54] */ double xi; /* [54] */ double eta; /* [54] */ double zeta; /* [54] */ double pxi; /* [54] */ double peta; /* [54] */ double pzeta; /* [54] */ double ue_1jk[5]; /* [54] */ double ue_nx0jk[5]; /* [54] */ double ue_i1k[5]; /* [54] */ double ue_iny0k[5]; /* [54] */ double ue_ij1[5]; /* [54] */ double ue_ijnz[5]; /* [54] */ #pragma omp for nowait /* [54] */ /* [54] */ /* [54] */ for (j = 0; j < ny; j++) { /* [54] */ /* [54] */ jglob = j; /* [54] */ /* [54] */ /* [54] */ /* [54] */ for (k = 1; k < nz - 1; k++) { /* [54] */ /* [54] */ zeta = ((double)k) / (nz - 1); /* [54] */ int _imopVarPre361; /* [54] */ _imopVarPre361 = jglob != 0; /* [54] */ /* [54] */ if (_imopVarPre361) { /* [54] */ /* [54] */ _imopVarPre361 = jglob != ny0 - 1; } /* [54] */ /* [54] */ if (_imopVarPre361) { /* [54] */ /* [54] */ eta = ((double)jglob) / (ny0 - 1); /* [54] */ /* [54] */ /* [54] */ /* [54] */ for (i = 0; i < nx; i++) { /* [54] */ /* [54] */ iglob = i; /* [54] */ int _imopVarPre363; /* [54] */ _imopVarPre363 = iglob != 0; /* [54] */ /* [54] */ if (_imopVarPre363) { /* [54] */ /* [54] */ _imopVarPre363 = iglob != nx0 - 1; } /* [54] */ /* [54] */ if (_imopVarPre363) { /* [54] */ /* [54] */ xi = ((double)iglob) / (nx0 - 1); /* [54] */ exact(0, jglob, k, ue_1jk); /* [54] */ /* [54] */ int _imopVarPre365; /* [54] */ _imopVarPre365 = nx0 - 1; /* [54] */ exact(_imopVarPre365, jglob, k, ue_nx0jk); /* [54] */ /* [54] */ exact(iglob, 0, k, ue_i1k); /* [54] */ /* [54] */ int _imopVarPre367; /* [54] */ _imopVarPre367 = ny0 - 1; /* [54] */ exact(iglob, _imopVarPre367, k, ue_iny0k); /* [54] */ /* [54] */ exact(iglob, jglob, 0, ue_ij1); /* [54] */ /* [54] */ int _imopVarPre369; /* [54] */ _imopVarPre369 = nz - 1; /* [54] */ exact(iglob, jglob, _imopVarPre369, ue_ijnz); /* [54] */ /* [54] */ /* [54] */ /* [54] */ /* [54] */ for (m = 0; m < 5; m++) { /* [54] */ /* [54] */ pxi = (1.0 - xi) * ue_1jk[m] + xi * ue_nx0jk[m]; /* [54] */ peta = (1.0 - eta) * ue_i1k[m] + eta * ue_iny0k[m]; /* [54] */ pzeta = (1.0 - zeta) * ue_ij1[m] + zeta * ue_ijnz[m]; /* [54] */ u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta; } } } } } } } } /* [] */ static void ssor() { /* [] */ /* [] */ int i; /* [] */ int j; /* [] */ int k; /* [] */ int m; /* [] */ int istep; /* [] */ double tmp; /* [] */ double delunm[5]; /* [] */ double tv[12][12][5]; /* [] */ tmp = 1.0 / (omega * (2.0 - omega)); /* [55] */ #pragma omp parallel private(i, j, k, m) { /* [55] */ /* [55] */ #pragma omp for nowait /* [55] */ /* [55] */ /* [55] */ for (i = 0; i < 12; i++) { /* [55] */ /* [55] */ /* [55] */ /* [55] */ /* [55] */ for (j = 0; j < 12; j++) { /* [55] */ /* [55] */ /* [55] */ /* [55] */ /* [55] */ for (k = 0; k < 5; k++) { /* [55] */ /* [55] */ /* [55] */ /* [55] */ /* [55] */ for (m = 0; m < 5; m++) { /* [55] */ /* [55] */ a[i][j][k][m] = 0.0; /* [55] */ b[i][j][k][m] = 0.0; /* [55] */ c[i][j][k][m] = 0.0; /* [55] */ d[i][j][k][m] = 0.0; } } } } } /* [56] */ #pragma omp parallel { /* [56] */ /* [56] */ int i_imopVarPre84; /* [56] */ int j_imopVarPre85; /* [56] */ int k_imopVarPre86; /* [56] */ int m_imopVarPre87; /* [56] */ int L1; /* [56] */ int L2; /* [56] */ int ist1; /* [56] */ int iend1; /* [56] */ int jst1; /* [56] */ int jend1; /* [56] */ double q; /* [56] */ double u21; /* [56] */ double u31; /* [56] */ double u41; /* [56] */ double tmp_imopVarPre88; /* [56] */ double u21i; /* [56] */ double u31i; /* [56] */ double u41i; /* [56] */ double u51i; /* [56] */ double u21j; /* [56] */ double u31j; /* [56] */ double u41j; /* [56] */ double u51j; /* [56] */ double u21k; /* [56] */ double u31k; /* [56] */ double u41k; /* [56] */ double u51k; /* [56] */ double u21im1; /* [56] */ double u31im1; /* [56] */ double u41im1; /* [56] */ double u51im1; /* [56] */ double u21jm1; /* [56] */ double u31jm1; /* [56] */ double u41jm1; /* [56] */ double u51jm1; /* [56] */ double u21km1; /* [56] */ double u31km1; /* [56] */ double u41km1; /* [56] */ double u51km1; /* [56] */ #pragma omp for nowait /* [56] */ /* [56] */ /* [56] */ for (i_imopVarPre84 = 0; i_imopVarPre84 <= nx - 1; i_imopVarPre84++) { /* [56] */ /* [56] */ /* [56] */ /* [56] */ /* [56] */ for (j_imopVarPre85 = 0; j_imopVarPre85 <= ny - 1; j_imopVarPre85++) { /* [56] */ /* [56] */ /* [56] */ /* [56] */ /* [56] */ for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /* [56] */ /* [56] */ /* [56] */ /* [56] */ /* [56] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [56] */ /* [56] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = -frct[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]; } } } } /* [56] */ L1 = 0; /* [56] */ L2 = nx - 1; /* [56] */ #pragma omp for nowait /* [56] */ /* [56] */ /* [56] */ for (i_imopVarPre84 = L1; i_imopVarPre84 <= L2; i_imopVarPre84++) { /* [56] */ /* [56] */ /* [56] */ /* [56] */ /* [56] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [56] */ /* [56] */ /* [56] */ /* [56] */ /* [56] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [56] */ /* [56] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /* [56] */ u21 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [56] */ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [56] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u21 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /* [56] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u21; /* [56] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u21; /* [56] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u21; } } } /* [56] */ // #pragma omp dummyFlush BARRIER_START /* [56] */ #pragma omp barrier /* [57] */ #pragma omp for nowait /* [57] */ /* [57] */ /* [57] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [57] */ /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [57] */ /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [57] */ /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [57] */ /* [57] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tx2 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } /* [57] */ L2 = nx - 1; /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= L2; i_imopVarPre84++) { /* [57] */ /* [57] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [57] */ u21i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /* [57] */ u31i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /* [57] */ u41i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /* [57] */ u51i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /* [57] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0]; /* [57] */ u21im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1]; /* [57] */ u31im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2]; /* [57] */ u41im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3]; /* [57] */ u51im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4]; /* [57] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /* [57] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tx3 * (u31i - u31im1); /* [57] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = tx3 * (u41i - u41im1); /* [57] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [57] */ /* [57] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dx1 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][0]); /* [57] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dx2 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1]); /* [57] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dx3 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2]); /* [57] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dx4 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3]); /* [57] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dx5 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4]); } /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [57] */ /* [57] */ rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); /* [57] */ rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } /* [57] */ ist1 = 3; /* [57] */ iend1 = nx - 4; /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (i_imopVarPre84 = ist1; i_imopVarPre84 <= iend1; i_imopVarPre84++) { /* [57] */ /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [57] */ /* [57] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84 - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84 + 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } /* [57] */ /* [57] */ /* [57] */ /* [57] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [57] */ /* [57] */ rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 5][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); /* [57] */ rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]); } } } /* [57] */ // #pragma omp dummyFlush BARRIER_START /* [57] */ #pragma omp barrier /* [58] */ L1 = 0; /* [58] */ L2 = ny - 1; /* [58] */ #pragma omp for nowait /* [58] */ /* [58] */ /* [58] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [58] */ /* [58] */ /* [58] */ /* [58] */ /* [58] */ for (j_imopVarPre85 = L1; j_imopVarPre85 <= L2; j_imopVarPre85++) { /* [58] */ /* [58] */ /* [58] */ /* [58] */ /* [58] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [58] */ /* [58] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /* [58] */ u31 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [58] */ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [58] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u31; /* [58] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u31 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /* [58] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u31; /* [58] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u31; } } } /* [58] */ // #pragma omp dummyFlush BARRIER_START /* [58] */ #pragma omp barrier /* [59] */ #pragma omp for nowait /* [59] */ /* [59] */ /* [59] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [59] */ /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [59] */ /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [59] */ /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [59] */ /* [59] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - ty2 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87]); } } /* [59] */ L2 = ny - 1; /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= L2; j_imopVarPre85++) { /* [59] */ /* [59] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [59] */ u21j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /* [59] */ u31j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /* [59] */ u41j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /* [59] */ u51j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /* [59] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0]; /* [59] */ u21jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1]; /* [59] */ u31jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2]; /* [59] */ u41jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3]; /* [59] */ u51jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4]; /* [59] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = ty3 * (u21j - u21jm1); /* [59] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /* [59] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = ty3 * (u41j - u41jm1); /* [59] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [59] */ /* [59] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dy1 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][0]); /* [59] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dy2 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1]); /* [59] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dy3 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2]); /* [59] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dy4 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3]); /* [59] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dy5 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4]); } /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [59] */ /* [59] */ rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87]); /* [59] */ rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][4][k_imopVarPre86][m_imopVarPre87]); } /* [59] */ jst1 = 3; /* [59] */ jend1 = ny - 4; /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (j_imopVarPre85 = jst1; j_imopVarPre85 <= jend1; j_imopVarPre85++) { /* [59] */ /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [59] */ /* [59] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85 - 2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85 + 2][k_imopVarPre86][m_imopVarPre87]); } } /* [59] */ /* [59] */ /* [59] */ /* [59] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [59] */ /* [59] */ rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 5][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]); /* [59] */ rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]); } } } /* [59] */ // #pragma omp dummyFlush BARRIER_START /* [59] */ #pragma omp barrier /* [60] */ #pragma omp for nowait /* [60] */ /* [60] */ /* [60] */ for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) { /* [60] */ /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) { /* [60] */ /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /* [60] */ /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /* [60] */ u41 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [60] */ q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u41; /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u41; /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u41 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q); /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u41; } /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [60] */ /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [60] */ /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tz2 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87]); } } /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) { /* [60] */ /* [60] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0]; /* [60] */ u21k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]; /* [60] */ u31k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]; /* [60] */ u41k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]; /* [60] */ u51k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]; /* [60] */ tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0]; /* [60] */ u21km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1]; /* [60] */ u31km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2]; /* [60] */ u41km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3]; /* [60] */ u51km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4]; /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = tz3 * (u21k - u21km1); /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tz3 * (u31k - u31km1); /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /* [60] */ flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) { /* [60] */ /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dz1 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][0]); /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dz2 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1]); /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dz3 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2]); /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dz4 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3]); /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dz5 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4]); } /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [60] */ /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87]); /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][4][m_imopVarPre87]); } /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (k_imopVarPre86 = 3; k_imopVarPre86 <= nz - 4; k_imopVarPre86++) { /* [60] */ /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [60] */ /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 2][m_imopVarPre87]); } } /* [60] */ /* [60] */ /* [60] */ /* [60] */ for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) { /* [60] */ /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 5][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]); /* [60] */ rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]); } } } } /* [61] */ #pragma omp parallel { /* [61] */ /* [61] */ double (*v)[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [61] */ double *sum; /* [61] */ v = rsd; /* [61] */ sum = rsdnm; /* [61] */ int i_imopVarPre75; /* [61] */ int j_imopVarPre76; /* [61] */ int k_imopVarPre77; /* [61] */ int m_imopVarPre78; /* [61] */ double sum0 = 0.0; /* [61] */ double sum1 = 0.0; /* [61] */ double sum2 = 0.0; /* [61] */ double sum3 = 0.0; /* [61] */ double sum4 = 0.0; /* [61] */ #pragma omp single nowait { /* [61] */ /* [61] */ /* [61] */ /* [61] */ /* [61] */ for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) { /* [61] */ /* [61] */ sum[m_imopVarPre78] = 0.0; } } /* [61] */ // #pragma omp dummyFlush BARRIER_START /* [61] */ #pragma omp barrier /* [62] */ #pragma omp for nowait /* [62] */ /* [62] */ /* [62] */ for (i_imopVarPre75 = ist; i_imopVarPre75 <= iend; i_imopVarPre75++) { /* [62] */ /* [62] */ /* [62] */ /* [62] */ /* [62] */ for (j_imopVarPre76 = jst; j_imopVarPre76 <= jend; j_imopVarPre76++) { /* [62] */ /* [62] */ /* [62] */ /* [62] */ /* [62] */ for (k_imopVarPre77 = 1; k_imopVarPre77 <= nz0 - 2; k_imopVarPre77++) { /* [62] */ /* [62] */ sum0 = sum0 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0]; /* [62] */ sum1 = sum1 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1]; /* [62] */ sum2 = sum2 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2]; /* [62] */ sum3 = sum3 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3]; /* [62] */ sum4 = sum4 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4]; } } } /* [62] */ // #pragma omp dummyFlush CRITICAL_START /* [62] */ #pragma omp critical { /* [62] */ /* [62] */ sum[0] += sum0; /* [62] */ sum[1] += sum1; /* [62] */ sum[2] += sum2; /* [62] */ sum[3] += sum3; /* [62] */ sum[4] += sum4; } /* [62] */ // #pragma omp dummyFlush CRITICAL_END /* [62] */ // #pragma omp dummyFlush BARRIER_START /* [62] */ #pragma omp barrier /* [63] */ #pragma omp single nowait { /* [63] */ /* [63] */ /* [63] */ /* [63] */ /* [63] */ for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) { /* [63] */ /* [63] */ double _imopVarPre154; /* [63] */ double _imopVarPre155; /* [63] */ _imopVarPre154 = sum[m_imopVarPre78] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /* [63] */ _imopVarPre155 = sqrt(_imopVarPre154); /* [63] */ /* [63] */ sum[m_imopVarPre78] = _imopVarPre155; } } } /* [] */ timer_clear(1); /* [] */ /* [] */ timer_start(1); /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ for (istep = 1; istep <= itmax; istep++) { /* [] */ /* [] */ int _imopVarPre372; /* [] */ int _imopVarPre370; /* [] */ int _imopVarPre371; /* [] */ _imopVarPre370 = istep % 20 == 0; /* [] */ /* [] */ if (!_imopVarPre370) { /* [] */ /* [] */ _imopVarPre371 = istep == itmax; /* [] */ /* [] */ if (!_imopVarPre371) { /* [] */ /* [] */ _imopVarPre371 = istep == 1; } /* [] */ _imopVarPre370 = _imopVarPre371; } /* [] */ /* [] */ if (_imopVarPre370) { /* [] */ /* [] */ #pragma omp master { /* [] */ /* [] */ printf(" Time step %4d\n", istep); /* [] */ } } /* [64] */ #pragma omp parallel private(istep, i, j, k, m) { /* [64] */ /* [64] */ int _imopVarPre377; /* [64] */ int _imopVarPre378; /* [64] */ int _imopVarPre379; /* [64] */ int _imopVarPre380; /* [64] */ #pragma omp for nowait /* [64] */ /* [64] */ /* [64] */ for (i = ist; i <= iend; i++) { /* [64] */ /* [64] */ /* [64] */ /* [64] */ /* [64] */ for (j = jst; j <= jend; j++) { /* [64] */ /* [64] */ /* [64] */ /* [64] */ /* [64] */ for (k = 1; k <= nz - 2; k++) { /* [64] */ /* [64] */ /* [64] */ /* [64] */ /* [64] */ for (m = 0; m < 5; m++) { /* [64] */ /* [64] */ rsd[i][j][k][m] = dt * rsd[i][j][k][m]; } } } } /* [64] */ // #pragma omp dummyFlush BARRIER_START /* [64] */ #pragma omp barrier /* [41] */ /* [41] */ /* [41] */ /* [41] */ for (k = 1; k <= nz - 2; k++) { /* [41] */ /* [41] */ jacld(k); /* [41] */ /* [41] */ blts(nx, ny, nz, k, omega, rsd, a, b, c, d, ist, iend, jst, jend, nx0, ny0); /* [41] */ } /* [41] */ // #pragma omp dummyFlush BARRIER_START /* [41] */ #pragma omp barrier /* [42] */ /* [42] */ /* [42] */ /* [42] */ for (k = nz - 2; k >= 1; k--) { /* [42] */ /* [42] */ jacu(k); /* [42] */ /* [42] */ buts(nx, ny, nz, k, omega, rsd, tv, d, a, b, c, ist, iend, jst, jend, nx0, ny0); /* [42] */ } /* [42] */ // #pragma omp dummyFlush BARRIER_START /* [42] */ #pragma omp barrier /* [65] */ #pragma omp for nowait /* [65] */ /* [65] */ /* [65] */ for (i = ist; i <= iend; i++) { /* [65] */ /* [65] */ /* [65] */ /* [65] */ /* [65] */ for (j = jst; j <= jend; j++) { /* [65] */ /* [65] */ /* [65] */ /* [65] */ /* [65] */ for (k = 1; k <= nz - 2; k++) { /* [65] */ /* [65] */ /* [65] */ /* [65] */ /* [65] */ for (m = 0; m < 5; m++) { /* [65] */ /* [65] */ u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m]; } } } } /* [65] */ /* [65] */ if (istep % inorm == 0) { /* [65] */ /* [65] */ double (*v)[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [65] */ double *sum; /* [65] */ v = rsd; /* [65] */ sum = delunm; /* [65] */ int i_imopVarPre89; /* [65] */ int j_imopVarPre90; /* [65] */ int k_imopVarPre91; /* [65] */ int m_imopVarPre92; /* [65] */ double sum0 = 0.0; /* [65] */ double sum1 = 0.0; /* [65] */ double sum2 = 0.0; /* [65] */ double sum3 = 0.0; /* [65] */ double sum4 = 0.0; /* [65] */ #pragma omp single nowait { /* [65] */ /* [65] */ /* [65] */ /* [65] */ /* [65] */ for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) { /* [65] */ /* [65] */ sum[m_imopVarPre92] = 0.0; } } /* [65] */ // #pragma omp dummyFlush BARRIER_START /* [65] */ #pragma omp barrier /* [66] */ #pragma omp for nowait /* [66] */ /* [66] */ /* [66] */ for (i_imopVarPre89 = ist; i_imopVarPre89 <= iend; i_imopVarPre89++) { /* [66] */ /* [66] */ /* [66] */ /* [66] */ /* [66] */ for (j_imopVarPre90 = jst; j_imopVarPre90 <= jend; j_imopVarPre90++) { /* [66] */ /* [66] */ /* [66] */ /* [66] */ /* [66] */ for (k_imopVarPre91 = 1; k_imopVarPre91 <= nz0 - 2; k_imopVarPre91++) { /* [66] */ /* [66] */ sum0 = sum0 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0]; /* [66] */ sum1 = sum1 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1]; /* [66] */ sum2 = sum2 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2]; /* [66] */ sum3 = sum3 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3]; /* [66] */ sum4 = sum4 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4]; } } } /* [66] */ // #pragma omp dummyFlush CRITICAL_START /* [66] */ #pragma omp critical { /* [66] */ /* [66] */ sum[0] += sum0; /* [66] */ sum[1] += sum1; /* [66] */ sum[2] += sum2; /* [66] */ sum[3] += sum3; /* [66] */ sum[4] += sum4; } /* [66] */ // #pragma omp dummyFlush CRITICAL_END /* [66] */ // #pragma omp dummyFlush BARRIER_START /* [66] */ #pragma omp barrier /* [67] */ #pragma omp single nowait { /* [67] */ /* [67] */ /* [67] */ /* [67] */ /* [67] */ for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) { /* [67] */ /* [67] */ double _imopVarPre154; /* [67] */ double _imopVarPre155; /* [67] */ _imopVarPre154 = sum[m_imopVarPre92] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /* [67] */ _imopVarPre155 = sqrt(_imopVarPre154); /* [67] */ /* [67] */ sum[m_imopVarPre92] = _imopVarPre155; } } /* [67] */ // #pragma omp dummyFlush BARRIER_START /* [67] */ #pragma omp barrier /* [68] */ // #pragma omp dummyFlush BARRIER_START /* [68] */ #pragma omp barrier } /* [65, 69] */ // #pragma omp dummyFlush BARRIER_START /* [65, 69] */ #pragma omp barrier /* [66, 70] */ int i_imopVarPre79; /* [66, 70] */ int j_imopVarPre80; /* [66, 70] */ int k_imopVarPre81; /* [66, 70] */ int m_imopVarPre82; /* [66, 70] */ int L1; /* [66, 70] */ int L2; /* [66, 70] */ int ist1; /* [66, 70] */ int iend1; /* [66, 70] */ int jst1; /* [66, 70] */ int jend1; /* [66, 70] */ double q; /* [66, 70] */ double u21; /* [66, 70] */ double u31; /* [66, 70] */ double u41; /* [66, 70] */ double tmp_imopVarPre83; /* [66, 70] */ double u21i; /* [66, 70] */ double u31i; /* [66, 70] */ double u41i; /* [66, 70] */ double u51i; /* [66, 70] */ double u21j; /* [66, 70] */ double u31j; /* [66, 70] */ double u41j; /* [66, 70] */ double u51j; /* [66, 70] */ double u21k; /* [66, 70] */ double u31k; /* [66, 70] */ double u41k; /* [66, 70] */ double u51k; /* [66, 70] */ double u21im1; /* [66, 70] */ double u31im1; /* [66, 70] */ double u41im1; /* [66, 70] */ double u51im1; /* [66, 70] */ double u21jm1; /* [66, 70] */ double u31jm1; /* [66, 70] */ double u41jm1; /* [66, 70] */ double u51jm1; /* [66, 70] */ double u21km1; /* [66, 70] */ double u31km1; /* [66, 70] */ double u41km1; /* [66, 70] */ double u51km1; /* [66, 70] */ #pragma omp for nowait /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ for (i_imopVarPre79 = 0; i_imopVarPre79 <= nx - 1; i_imopVarPre79++) { /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ for (j_imopVarPre80 = 0; j_imopVarPre80 <= ny - 1; j_imopVarPre80++) { /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ /* [66, 70] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [66, 70] */ /* [66, 70] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = -frct[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]; } } } } /* [66, 70] */ // #pragma omp dummyFlush BARRIER_START /* [66, 70] */ #pragma omp barrier /* [67, 71] */ L1 = 0; /* [67, 71] */ L2 = nx - 1; /* [67, 71] */ #pragma omp for nowait /* [67, 71] */ /* [67, 71] */ /* [67, 71] */ for (i_imopVarPre79 = L1; i_imopVarPre79 <= L2; i_imopVarPre79++) { /* [67, 71] */ /* [67, 71] */ /* [67, 71] */ /* [67, 71] */ /* [67, 71] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [67, 71] */ /* [67, 71] */ /* [67, 71] */ /* [67, 71] */ /* [67, 71] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [67, 71] */ /* [67, 71] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /* [67, 71] */ u21 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [67, 71] */ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [67, 71] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u21 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /* [67, 71] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u21; /* [67, 71] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u21; /* [67, 71] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u21; } } } /* [67, 71] */ // #pragma omp dummyFlush BARRIER_START /* [67, 71] */ #pragma omp barrier /* [68, 72] */ #pragma omp for nowait /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [68, 72] */ /* [68, 72] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tx2 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } /* [68, 72] */ L2 = nx - 1; /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= L2; i_imopVarPre79++) { /* [68, 72] */ /* [68, 72] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [68, 72] */ u21i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /* [68, 72] */ u31i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /* [68, 72] */ u41i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /* [68, 72] */ u51i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /* [68, 72] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0]; /* [68, 72] */ u21im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1]; /* [68, 72] */ u31im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2]; /* [68, 72] */ u41im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3]; /* [68, 72] */ u51im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4]; /* [68, 72] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1); /* [68, 72] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tx3 * (u31i - u31im1); /* [68, 72] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = tx3 * (u41i - u41im1); /* [68, 72] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [68, 72] */ /* [68, 72] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dx1 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][0]); /* [68, 72] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dx2 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1]); /* [68, 72] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dx3 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2]); /* [68, 72] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dx4 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3]); /* [68, 72] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dx5 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4]); } /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [68, 72] */ /* [68, 72] */ rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); /* [68, 72] */ rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } /* [68, 72] */ ist1 = 3; /* [68, 72] */ iend1 = nx - 4; /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (i_imopVarPre79 = ist1; i_imopVarPre79 <= iend1; i_imopVarPre79++) { /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [68, 72] */ /* [68, 72] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79 - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79 + 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ /* [68, 72] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [68, 72] */ /* [68, 72] */ rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 5][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); /* [68, 72] */ rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]); } } } /* [68, 72] */ // #pragma omp dummyFlush BARRIER_START /* [68, 72] */ #pragma omp barrier /* [69, 73] */ L1 = 0; /* [69, 73] */ L2 = ny - 1; /* [69, 73] */ #pragma omp for nowait /* [69, 73] */ /* [69, 73] */ /* [69, 73] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [69, 73] */ /* [69, 73] */ /* [69, 73] */ /* [69, 73] */ /* [69, 73] */ for (j_imopVarPre80 = L1; j_imopVarPre80 <= L2; j_imopVarPre80++) { /* [69, 73] */ /* [69, 73] */ /* [69, 73] */ /* [69, 73] */ /* [69, 73] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [69, 73] */ /* [69, 73] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /* [69, 73] */ u31 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [69, 73] */ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [69, 73] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u31; /* [69, 73] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u31 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /* [69, 73] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u31; /* [69, 73] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u31; } } } /* [69, 73] */ // #pragma omp dummyFlush BARRIER_START /* [69, 73] */ #pragma omp barrier /* [70, 74] */ #pragma omp for nowait /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [70, 74] */ /* [70, 74] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - ty2 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82]); } } /* [70, 74] */ L2 = ny - 1; /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= L2; j_imopVarPre80++) { /* [70, 74] */ /* [70, 74] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [70, 74] */ u21j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /* [70, 74] */ u31j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /* [70, 74] */ u41j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /* [70, 74] */ u51j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /* [70, 74] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0]; /* [70, 74] */ u21jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1]; /* [70, 74] */ u31jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2]; /* [70, 74] */ u41jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3]; /* [70, 74] */ u51jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4]; /* [70, 74] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = ty3 * (u21j - u21jm1); /* [70, 74] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1); /* [70, 74] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = ty3 * (u41j - u41jm1); /* [70, 74] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [70, 74] */ /* [70, 74] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dy1 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][0]); /* [70, 74] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dy2 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1]); /* [70, 74] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dy3 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2]); /* [70, 74] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dy4 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3]); /* [70, 74] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dy5 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4]); } /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [70, 74] */ /* [70, 74] */ rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82]); /* [70, 74] */ rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][4][k_imopVarPre81][m_imopVarPre82]); } /* [70, 74] */ jst1 = 3; /* [70, 74] */ jend1 = ny - 4; /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (j_imopVarPre80 = jst1; j_imopVarPre80 <= jend1; j_imopVarPre80++) { /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [70, 74] */ /* [70, 74] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80 - 2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80 + 2][k_imopVarPre81][m_imopVarPre82]); } } /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ /* [70, 74] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [70, 74] */ /* [70, 74] */ rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 5][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]); /* [70, 74] */ rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]); } } } /* [70, 74] */ // #pragma omp dummyFlush BARRIER_START /* [70, 74] */ #pragma omp barrier /* [71, 75] */ #pragma omp for nowait /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) { /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) { /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /* [71, 75] */ /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /* [71, 75] */ u41 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [71, 75] */ q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u41; /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u41; /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u41 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q); /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u41; } /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [71, 75] */ /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tz2 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82]); } } /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) { /* [71, 75] */ /* [71, 75] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0]; /* [71, 75] */ u21k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]; /* [71, 75] */ u31k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]; /* [71, 75] */ u41k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]; /* [71, 75] */ u51k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]; /* [71, 75] */ tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0]; /* [71, 75] */ u21km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1]; /* [71, 75] */ u31km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2]; /* [71, 75] */ u41km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3]; /* [71, 75] */ u51km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4]; /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = tz3 * (u21k - u21km1); /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tz3 * (u31k - u31km1); /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1); /* [71, 75] */ flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) { /* [71, 75] */ /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dz1 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][0]); /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dz2 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1]); /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dz3 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2]); /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dz4 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3]); /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dz5 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4]); } /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [71, 75] */ /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82]); /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][4][m_imopVarPre82]); } /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (k_imopVarPre81 = 3; k_imopVarPre81 <= nz - 4; k_imopVarPre81++) { /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [71, 75] */ /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 2][m_imopVarPre82]); } } /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ /* [71, 75] */ for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) { /* [71, 75] */ /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 5][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]); /* [71, 75] */ rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]); } } } /* [71, 75] */ // #pragma omp dummyFlush BARRIER_START /* [71, 75] */ #pragma omp barrier /* [72, 76] */ // #pragma omp dummyFlush BARRIER_START /* [72, 76] */ #pragma omp barrier /* [73, 77] */ #pragma omp master { /* [73, 77] */ /* [73, 77] */ _imopVarPre372 = (istep % inorm == 0); /* [73, 77] */ /* [73, 77] */ if (!_imopVarPre372) { /* [73, 77] */ /* [73, 77] */ _imopVarPre372 = (istep == itmax); } } /* [73, 77] */ // #pragma omp dummyFlush BARRIER_START /* [73, 77] */ #pragma omp barrier /* [74] */ /* [74] */ if (_imopVarPre372) { /* [74] */ /* [74] */ double (*v)[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5]; /* [74] */ double *sum; /* [74] */ v = rsd; /* [74] */ sum = rsdnm; /* [74] */ int i_imopVarPre93; /* [74] */ int j_imopVarPre94; /* [74] */ int k_imopVarPre95; /* [74] */ int m_imopVarPre96; /* [74] */ double sum0 = 0.0; /* [74] */ double sum1 = 0.0; /* [74] */ double sum2 = 0.0; /* [74] */ double sum3 = 0.0; /* [74] */ double sum4 = 0.0; /* [74] */ #pragma omp single nowait { /* [74] */ /* [74] */ /* [74] */ /* [74] */ /* [74] */ for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) { /* [74] */ /* [74] */ sum[m_imopVarPre96] = 0.0; } } /* [74] */ // #pragma omp dummyFlush BARRIER_START /* [74] */ #pragma omp barrier /* [75] */ #pragma omp for nowait /* [75] */ /* [75] */ /* [75] */ for (i_imopVarPre93 = ist; i_imopVarPre93 <= iend; i_imopVarPre93++) { /* [75] */ /* [75] */ /* [75] */ /* [75] */ /* [75] */ for (j_imopVarPre94 = jst; j_imopVarPre94 <= jend; j_imopVarPre94++) { /* [75] */ /* [75] */ /* [75] */ /* [75] */ /* [75] */ for (k_imopVarPre95 = 1; k_imopVarPre95 <= nz0 - 2; k_imopVarPre95++) { /* [75] */ /* [75] */ sum0 = sum0 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0]; /* [75] */ sum1 = sum1 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1]; /* [75] */ sum2 = sum2 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2]; /* [75] */ sum3 = sum3 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3]; /* [75] */ sum4 = sum4 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4]; } } } /* [75] */ // #pragma omp dummyFlush CRITICAL_START /* [75] */ #pragma omp critical { /* [75] */ /* [75] */ sum[0] += sum0; /* [75] */ sum[1] += sum1; /* [75] */ sum[2] += sum2; /* [75] */ sum[3] += sum3; /* [75] */ sum[4] += sum4; } /* [75] */ // #pragma omp dummyFlush CRITICAL_END /* [75] */ // #pragma omp dummyFlush BARRIER_START /* [75] */ #pragma omp barrier /* [76] */ #pragma omp single nowait { /* [76] */ /* [76] */ /* [76] */ /* [76] */ /* [76] */ for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) { /* [76] */ /* [76] */ double _imopVarPre154; /* [76] */ double _imopVarPre155; /* [76] */ _imopVarPre154 = sum[m_imopVarPre96] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2)); /* [76] */ _imopVarPre155 = sqrt(_imopVarPre154); /* [76] */ /* [76] */ sum[m_imopVarPre96] = _imopVarPre155; } } } /* [74, 76] */ // #pragma omp dummyFlush BARRIER_START /* [74, 76] */ #pragma omp barrier /* [75, 77] */ #pragma omp master { /* [75, 77] */ /* [75, 77] */ _imopVarPre377 = (rsdnm[0] < tolrsd[0]); /* [75, 77] */ /* [75, 77] */ if (_imopVarPre377) { /* [75, 77] */ /* [75, 77] */ _imopVarPre378 = (rsdnm[1] < tolrsd[1]); /* [75, 77] */ /* [75, 77] */ if (_imopVarPre378) { /* [75, 77] */ /* [75, 77] */ _imopVarPre379 = (rsdnm[2] < tolrsd[2]); /* [75, 77] */ /* [75, 77] */ if (_imopVarPre379) { /* [75, 77] */ /* [75, 77] */ _imopVarPre380 = (rsdnm[3] < tolrsd[3]); /* [75, 77] */ /* [75, 77] */ if (_imopVarPre380) { /* [75, 77] */ /* [75, 77] */ _imopVarPre380 = (rsdnm[4] < tolrsd[4]); } /* [75, 77] */ _imopVarPre379 = _imopVarPre380; } /* [75, 77] */ _imopVarPre378 = _imopVarPre379; } /* [75, 77] */ _imopVarPre377 = _imopVarPre378; } /* [75, 77] */ /* [75, 77] */ if (_imopVarPre377) { /* [75, 77] */ /* [75, 77] */ exit(1); /* [75, 77] */ } } } } /* [] */ timer_stop(1); /* [] */ /* [] */ maxtime = timer_read(1); /* [] */ } /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ /* [] */ static void verify(double xcr[5], double xce[5], double xci, char *class, boolean * verified) { /* [] */ /* [] */ double xcrref[5]; /* [] */ double xceref[5]; /* [] */ double xciref; /* [] */ double xcrdif[5]; /* [] */ double xcedif[5]; /* [] */ double xcidif; /* [] */ double epsilon; /* [] */ double dtref; /* [] */ int m; /* [] */ epsilon = 1.0e-08; /* [] */ *class = 'U'; /* [] */ *verified = 1; /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ xcrref[m] = 1.0; /* [] */ xceref[m] = 1.0; } /* [] */ xciref = 1.0; /* [] */ int _imopVarPre384; /* [] */ int _imopVarPre385; /* [] */ int _imopVarPre386; /* [] */ _imopVarPre384 = nx0 == 12; /* [] */ /* [] */ if (_imopVarPre384) { /* [] */ /* [] */ _imopVarPre385 = ny0 == 12; /* [] */ /* [] */ if (_imopVarPre385) { /* [] */ /* [] */ _imopVarPre386 = nz0 == 12; /* [] */ /* [] */ if (_imopVarPre386) { /* [] */ /* [] */ _imopVarPre386 = itmax == 50; } /* [] */ _imopVarPre385 = _imopVarPre386; } /* [] */ _imopVarPre384 = _imopVarPre385; } /* [] */ /* [] */ if (_imopVarPre384) { /* [] */ /* [] */ *class = 'S'; /* [] */ dtref = 5.0e-1; /* [] */ xcrref[0] = 1.6196343210976702e-02; /* [] */ xcrref[1] = 2.1976745164821318e-03; /* [] */ xcrref[2] = 1.5179927653399185e-03; /* [] */ xcrref[3] = 1.5029584435994323e-03; /* [] */ xcrref[4] = 3.4264073155896461e-02; /* [] */ xceref[0] = 6.4223319957960924e-04; /* [] */ xceref[1] = 8.4144342047347926e-05; /* [] */ xceref[2] = 5.8588269616485186e-05; /* [] */ xceref[3] = 5.8474222595157350e-05; /* [] */ xceref[4] = 1.3103347914111294e-03; /* [] */ xciref = 7.8418928865937083; } else { /* [] */ /* [] */ int _imopVarPre390; /* [] */ int _imopVarPre391; /* [] */ int _imopVarPre392; /* [] */ _imopVarPre390 = nx0 == 33; /* [] */ /* [] */ if (_imopVarPre390) { /* [] */ /* [] */ _imopVarPre391 = ny0 == 33; /* [] */ /* [] */ if (_imopVarPre391) { /* [] */ /* [] */ _imopVarPre392 = nz0 == 33; /* [] */ /* [] */ if (_imopVarPre392) { /* [] */ /* [] */ _imopVarPre392 = itmax == 300; } /* [] */ _imopVarPre391 = _imopVarPre392; } /* [] */ _imopVarPre390 = _imopVarPre391; } /* [] */ /* [] */ if (_imopVarPre390) { /* [] */ /* [] */ *class = 'W'; /* [] */ dtref = 1.5e-3; /* [] */ xcrref[0] = 0.1236511638192e+02; /* [] */ xcrref[1] = 0.1317228477799e+01; /* [] */ xcrref[2] = 0.2550120713095e+01; /* [] */ xcrref[3] = 0.2326187750252e+01; /* [] */ xcrref[4] = 0.2826799444189e+02; /* [] */ xceref[0] = 0.4867877144216; /* [] */ xceref[1] = 0.5064652880982e-01; /* [] */ xceref[2] = 0.9281818101960e-01; /* [] */ xceref[3] = 0.8570126542733e-01; /* [] */ xceref[4] = 0.1084277417792e+01; /* [] */ xciref = 0.1161399311023e+02; } else { /* [] */ /* [] */ int _imopVarPre396; /* [] */ int _imopVarPre397; /* [] */ int _imopVarPre398; /* [] */ _imopVarPre396 = nx0 == 64; /* [] */ /* [] */ if (_imopVarPre396) { /* [] */ /* [] */ _imopVarPre397 = ny0 == 64; /* [] */ /* [] */ if (_imopVarPre397) { /* [] */ /* [] */ _imopVarPre398 = nz0 == 64; /* [] */ /* [] */ if (_imopVarPre398) { /* [] */ /* [] */ _imopVarPre398 = itmax == 250; } /* [] */ _imopVarPre397 = _imopVarPre398; } /* [] */ _imopVarPre396 = _imopVarPre397; } /* [] */ /* [] */ if (_imopVarPre396) { /* [] */ /* [] */ *class = 'A'; /* [] */ dtref = 2.0e+0; /* [] */ xcrref[0] = 7.7902107606689367e+02; /* [] */ xcrref[1] = 6.3402765259692870e+01; /* [] */ xcrref[2] = 1.9499249727292479e+02; /* [] */ xcrref[3] = 1.7845301160418537e+02; /* [] */ xcrref[4] = 1.8384760349464247e+03; /* [] */ xceref[0] = 2.9964085685471943e+01; /* [] */ xceref[1] = 2.8194576365003349; /* [] */ xceref[2] = 7.3473412698774742; /* [] */ xceref[3] = 6.7139225687777051; /* [] */ xceref[4] = 7.0715315688392578e+01; /* [] */ xciref = 2.6030925604886277e+01; } else { /* [] */ /* [] */ int _imopVarPre402; /* [] */ int _imopVarPre403; /* [] */ int _imopVarPre404; /* [] */ _imopVarPre402 = nx0 == 102; /* [] */ /* [] */ if (_imopVarPre402) { /* [] */ /* [] */ _imopVarPre403 = ny0 == 102; /* [] */ /* [] */ if (_imopVarPre403) { /* [] */ /* [] */ _imopVarPre404 = nz0 == 102; /* [] */ /* [] */ if (_imopVarPre404) { /* [] */ /* [] */ _imopVarPre404 = itmax == 250; } /* [] */ _imopVarPre403 = _imopVarPre404; } /* [] */ _imopVarPre402 = _imopVarPre403; } /* [] */ /* [] */ if (_imopVarPre402) { /* [] */ /* [] */ *class = 'B'; /* [] */ dtref = 2.0e+0; /* [] */ xcrref[0] = 3.5532672969982736e+03; /* [] */ xcrref[1] = 2.6214750795310692e+02; /* [] */ xcrref[2] = 8.8333721850952190e+02; /* [] */ xcrref[3] = 7.7812774739425265e+02; /* [] */ xcrref[4] = 7.3087969592545314e+03; /* [] */ xceref[0] = 1.1401176380212709e+02; /* [] */ xceref[1] = 8.1098963655421574; /* [] */ xceref[2] = 2.8480597317698308e+01; /* [] */ xceref[3] = 2.5905394567832939e+01; /* [] */ xceref[4] = 2.6054907504857413e+02; /* [] */ xciref = 4.7887162703308227e+01; } else { /* [] */ /* [] */ int _imopVarPre408; /* [] */ int _imopVarPre409; /* [] */ int _imopVarPre410; /* [] */ _imopVarPre408 = nx0 == 162; /* [] */ /* [] */ if (_imopVarPre408) { /* [] */ /* [] */ _imopVarPre409 = ny0 == 162; /* [] */ /* [] */ if (_imopVarPre409) { /* [] */ /* [] */ _imopVarPre410 = nz0 == 162; /* [] */ /* [] */ if (_imopVarPre410) { /* [] */ /* [] */ _imopVarPre410 = itmax == 250; } /* [] */ _imopVarPre409 = _imopVarPre410; } /* [] */ _imopVarPre408 = _imopVarPre409; } /* [] */ /* [] */ if (_imopVarPre408) { /* [] */ /* [] */ *class = 'C'; /* [] */ dtref = 2.0e+0; /* [] */ xcrref[0] = 1.03766980323537846e+04; /* [] */ xcrref[1] = 8.92212458801008552e+02; /* [] */ xcrref[2] = 2.56238814582660871e+03; /* [] */ xcrref[3] = 2.19194343857831427e+03; /* [] */ xcrref[4] = 1.78078057261061185e+04; /* [] */ xceref[0] = 2.15986399716949279e+02; /* [] */ xceref[1] = 1.55789559239863600e+01; /* [] */ xceref[2] = 5.41318863077207766e+01; /* [] */ xceref[3] = 4.82262643154045421e+01; /* [] */ xceref[4] = 4.55902910043250358e+02; /* [] */ xciref = 6.66404553572181300e+01; } else { /* [] */ /* [] */ *verified = 0; } } } } } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ double _imopVarPre412; /* [] */ double _imopVarPre413; /* [] */ _imopVarPre412 = (xcr[m] - xcrref[m]) / xcrref[m]; /* [] */ _imopVarPre413 = fabs(_imopVarPre412); /* [] */ /* [] */ xcrdif[m] = _imopVarPre413; /* [] */ double _imopVarPre415; /* [] */ double _imopVarPre416; /* [] */ _imopVarPre415 = (xce[m] - xceref[m]) / xceref[m]; /* [] */ _imopVarPre416 = fabs(_imopVarPre415); /* [] */ /* [] */ xcedif[m] = _imopVarPre416; } /* [] */ double _imopVarPre418; /* [] */ double _imopVarPre419; /* [] */ _imopVarPre418 = (xci - xciref) / xciref; /* [] */ _imopVarPre419 = fabs(_imopVarPre418); /* [] */ /* [] */ xcidif = _imopVarPre419; /* [] */ /* [] */ if (*class != 'U') { /* [] */ /* [] */ char _imopVarPre421; /* [] */ _imopVarPre421 = *class; /* [] */ printf("\n Verification being performed for class %1c\n", _imopVarPre421); /* [] */ /* [] */ printf(" Accuracy setting for epsilon = %20.13e\n", epsilon); /* [] */ /* [] */ double _imopVarPre424; /* [] */ double _imopVarPre425; /* [] */ _imopVarPre424 = dt - dtref; /* [] */ _imopVarPre425 = fabs(_imopVarPre424); /* [] */ /* [] */ /* [] */ if (_imopVarPre425 > epsilon) { /* [] */ /* [] */ *verified = 0; /* [] */ *class = 'U'; /* [] */ printf(" DT does not match the reference value of %15.8e\n", dtref); /* [] */ } } else { /* [] */ /* [] */ printf(" Unknown class\n"); /* [] */ } /* [] */ /* [] */ if (*class != 'U') { /* [] */ /* [] */ printf(" Comparison of RMS-norms of residual\n"); /* [] */ } else { /* [] */ /* [] */ printf(" RMS-norms of residual\n"); /* [] */ } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ /* [] */ if (*class == 'U') { /* [] */ /* [] */ double _imopVarPre427; /* [] */ _imopVarPre427 = xcr[m]; /* [] */ printf(" %2d %20.13e\n", m, _imopVarPre427); /* [] */ } else { /* [] */ /* [] */ /* [] */ if (xcrdif[m] > epsilon) { /* [] */ /* [] */ *verified = 0; /* [] */ double _imopVarPre431; /* [] */ double _imopVarPre432; /* [] */ double _imopVarPre433; /* [] */ _imopVarPre431 = xcrdif[m]; /* [] */ _imopVarPre432 = xcrref[m]; /* [] */ _imopVarPre433 = xcr[m]; /* [] */ printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre433, _imopVarPre432, _imopVarPre431); /* [] */ } else { /* [] */ /* [] */ double _imopVarPre437; /* [] */ double _imopVarPre438; /* [] */ double _imopVarPre439; /* [] */ _imopVarPre437 = xcrdif[m]; /* [] */ _imopVarPre438 = xcrref[m]; /* [] */ _imopVarPre439 = xcr[m]; /* [] */ printf(" %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre439, _imopVarPre438, _imopVarPre437); /* [] */ } } } /* [] */ /* [] */ if (*class != 'U') { /* [] */ /* [] */ printf(" Comparison of RMS-norms of solution error\n"); /* [] */ } else { /* [] */ /* [] */ printf(" RMS-norms of solution error\n"); /* [] */ } /* [] */ /* [] */ /* [] */ /* [] */ for (m = 0; m < 5; m++) { /* [] */ /* [] */ /* [] */ if (*class == 'U') { /* [] */ /* [] */ double _imopVarPre441; /* [] */ _imopVarPre441 = xce[m]; /* [] */ printf(" %2d %20.13e\n", m, _imopVarPre441); /* [] */ } else { /* [] */ /* [] */ /* [] */ if (xcedif[m] > epsilon) { /* [] */ /* [] */ *verified = 0; /* [] */ double _imopVarPre445; /* [] */ double _imopVarPre446; /* [] */ double _imopVarPre447; /* [] */ _imopVarPre445 = xcedif[m]; /* [] */ _imopVarPre446 = xceref[m]; /* [] */ _imopVarPre447 = xce[m]; /* [] */ printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre447, _imopVarPre446, _imopVarPre445); /* [] */ } else { /* [] */ /* [] */ double _imopVarPre451; /* [] */ double _imopVarPre452; /* [] */ double _imopVarPre453; /* [] */ _imopVarPre451 = xcedif[m]; /* [] */ _imopVarPre452 = xceref[m]; /* [] */ _imopVarPre453 = xce[m]; /* [] */ printf(" %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre453, _imopVarPre452, _imopVarPre451); /* [] */ } } } /* [] */ /* [] */ if (*class != 'U') { /* [] */ /* [] */ printf(" Comparison of surface integral\n"); /* [] */ } else { /* [] */ /* [] */ printf(" Surface integral\n"); /* [] */ } /* [] */ /* [] */ if (*class == 'U') { /* [] */ /* [] */ printf(" %20.13e\n", xci); /* [] */ } else { /* [] */ /* [] */ /* [] */ if (xcidif > epsilon) { /* [] */ /* [] */ *verified = 0; /* [] */ printf(" FAILURE: %20.13e%20.13e%20.13e\n", xci, xciref, xcidif); /* [] */ } else { /* [] */ /* [] */ printf(" %20.13e%20.13e%20.13e\n", xci, xciref, xcidif); /* [] */ } } /* [] */ /* [] */ if (*class == 'U') { /* [] */ /* [] */ printf(" No reference values provided\n"); /* [] */ /* [] */ printf(" No verification performed\n"); /* [] */ } else { /* [] */ /* [] */ /* [] */ if (*verified) { /* [] */ /* [] */ printf(" Verification Successful\n"); /* [] */ } else { /* [] */ /* [] */ printf(" Verification failed\n"); /* [] */ } } }
XSHA_fmt_plug.c
/* * This file is part of John the Ripper password cracker, * Copyright (c) 2008,2011 by Solar Designer * * Intrinsics support added by magnum 2011. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_XSHA; #elif FMT_REGISTERS_H john_register_one(&fmt_XSHA); #else #include <string.h> #include "arch.h" #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1) #ifdef _OPENMP static unsigned int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 128 #endif #endif #endif #include "simd-intrinsics.h" #include "params.h" #include "common.h" #include "formats.h" #include "sha.h" #include "johnswap.h" #include "memdbg.h" #define FORMAT_LABEL "xsha" #define FORMAT_NAME "Mac OS X 10.4 - 10.6" #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 51 #define CIPHERTEXT_LENGTH 48 #define BINARY_SIZE 20 #define BINARY_ALIGN 4 #define SALT_SIZE 4 #define SALT_ALIGN 4 #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 ) //for endianity conversion #else #define MIN_KEYS_PER_CRYPT 1 #ifdef _OPENMP #define MAX_KEYS_PER_CRYPT (0x200 * 3) #else #define MAX_KEYS_PER_CRYPT 0x100 #endif #endif static struct fmt_tests tests[] = { {"12345678F9083C7F66F46A0A102E4CC17EC08C8AF120571B", "abc"}, {"12345678EB8844BFAF2A8CBDD587A37EF8D4A290680D5818", "azertyuiop1"}, {"3234C32AAA335FD20E3F95870E5851BDBE942B79CE4FDD92", "azertyuiop2"}, {"01295B67659E95F32931CEDB3BA50289E2826AF3D5A1422F", "apple"}, {"0E6A48F765D0FFFFF6247FA80D748E615F91DD0C7431E4D9", "macintosh"}, {"A320163F1E6DB42C3949F7E232888ACC7DB7A0A17E493DBA", "test"}, {"743777471285CB3566886D4821D556E475E0DF9234308B22", "123"}, {"474379622BD7B9F84BD6E4BB52ABF9D01705EFB0A2426655", "passWOrd"}, {"597A523666A10C534495DB6333CF7EBA70C1A578CADE11A3", ""}, {NULL} }; #ifdef SIMD_COEF_32 static ARCH_WORD_32 (*saved_key); static ARCH_WORD_32 (*crypt_key); static ARCH_WORD_32 cur_salt; #else static char saved_key[MAX_KEYS_PER_CRYPT][PLAINTEXT_LENGTH + 1]; static int saved_len[MAX_KEYS_PER_CRYPT]; static SHA_CTX ctx_salt; static ARCH_WORD_32 crypt_out[MAX_KEYS_PER_CRYPT][5]; #endif static void init(struct fmt_main *self) { #ifdef SIMD_COEF_32 #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt = omp_t * NBKEYS; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt = omp_t * NBKEYS; #endif saved_key = mem_calloc_align(self->params.max_keys_per_crypt, SHA_BUF_SIZ * 4, MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(self->params.max_keys_per_crypt, BINARY_SIZE, MEM_ALIGN_SIMD); #endif } static void done(void) { #ifdef SIMD_COEF_32 MEM_FREE(crypt_key); MEM_FREE(saved_key); #endif } static int valid(char *ciphertext, struct fmt_main *self) { char *pos; /* Require uppercase hex digits (assume ASCII) */ pos = ciphertext; while (atoi16[ARCH_INDEX(*pos)] != 0x7F && *pos < 'a') pos++; return !*pos && pos - ciphertext == CIPHERTEXT_LENGTH; } static void *get_binary(char *ciphertext) { static unsigned char *out; char *p; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = ciphertext + 8; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } #ifdef SIMD_COEF_32 alter_endianity(out, BINARY_SIZE); #endif return out; } static void *get_salt(char *ciphertext) { static unsigned int outbuf[SALT_SIZE / sizeof(int)]; unsigned char *out = (unsigned char*)outbuf; char *p; int i; p = ciphertext; for (i = 0; i < SALT_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } #ifdef SIMD_COEF_32 alter_endianity(out, SALT_SIZE); #endif return out; } #ifdef SIMD_COEF_32 static int get_hash_0(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*5] & PH_MASK_0; } static int get_hash_1(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*5] & PH_MASK_1; } static int get_hash_2(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*5] & PH_MASK_2; } static int get_hash_3(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*5] & PH_MASK_3; } static int get_hash_4(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*5] & PH_MASK_4; } static int get_hash_5(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*5] & PH_MASK_5; } static int get_hash_6(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*5] & PH_MASK_6; } #else static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } #endif static int salt_hash(void *salt) { return *(ARCH_WORD_32 *)salt & (SALT_HASH_SIZE - 1); } static void set_salt(void *salt) { #ifdef SIMD_COEF_32 cur_salt = *(ARCH_WORD_32*)salt; #else SHA1_Init(&ctx_salt); SHA1_Update(&ctx_salt, salt, SALT_SIZE); #endif } static void set_key(char *key, int index) { #ifdef SIMD_COEF_32 #if ARCH_ALLOWS_UNALIGNED const ARCH_WORD_32 *wkey = (ARCH_WORD_32*)key; #else char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t)); const ARCH_WORD_32 *wkey = (uint32_t*)(is_aligned(key, sizeof(uint32_t)) ? key : strcpy(buf_aligned, key)); #endif ARCH_WORD_32 *keybuffer = &saved_key[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32 + SIMD_COEF_32]; ARCH_WORD_32 *keybuf_word = keybuffer; unsigned int len; ARCH_WORD_32 temp; len = 4; while((temp = *wkey++) & 0xff) { if (!(temp & 0xff00)) { *keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 8)); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80 << 16)); len+=2; goto key_cleaning; } if (!(temp & 0xff000000)) { *keybuf_word = JOHNSWAP(temp | (0x80U << 24)); len+=3; goto key_cleaning; } *keybuf_word = JOHNSWAP(temp); len += 4; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80000000; key_cleaning: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } keybuffer[14*SIMD_COEF_32] = len << 3; #else int length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; saved_len[index] = length; memcpy(saved_key[index], key, length); #endif } static char *get_key(int index) { #ifdef SIMD_COEF_32 unsigned int i,s; static char out[PLAINTEXT_LENGTH + 1]; s = ((unsigned int *)saved_key)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] >> 3; for(i = 0; i < (s - SALT_SIZE); i++) out[i] = ((char*)saved_key)[ GETPOS((i + SALT_SIZE), index) ]; out[i] = 0; return (char *) out; #else saved_key[index][saved_len[index]] = 0; return saved_key[index]; #endif } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; #ifdef SIMD_COEF_32 int i = 0; #if defined(_OPENMP) #pragma omp parallel for for (i=0; i < omp_t; i++) { #endif unsigned int *in = &saved_key[i*NBKEYS*SHA_BUF_SIZ]; unsigned int *out = &crypt_key[i*NBKEYS*BINARY_SIZE/4]; unsigned int j; for (j=0; j < NBKEYS; j++) in[(j&(SIMD_COEF_32-1)) + j/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = cur_salt; SIMDSHA1body(in, out, NULL, SSEi_MIXED_IN); #if defined(_OPENMP) } #endif #else int i; #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(ctx_salt, saved_key, saved_len, crypt_out) #endif for (i = 0; i < count; i++) { SHA_CTX ctx; memcpy(&ctx, &ctx_salt, sizeof(ctx)); SHA1_Update(&ctx, saved_key[i], saved_len[i]); SHA1_Final((unsigned char *)(crypt_out[i]), &ctx); } #endif return count; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x,y=0; #ifdef _OPENMP for(;y<SIMD_PARA_SHA1*omp_t;y++) #else for(;y<SIMD_PARA_SHA1;y++) #endif for(x=0;x<SIMD_COEF_32;x++) { if( ((ARCH_WORD_32 *)binary)[0] == ((ARCH_WORD_32 *)crypt_key)[x+y*SIMD_COEF_32*5] ) return 1; } return 0; #else ARCH_WORD_32 b0 = *(ARCH_WORD_32 *)binary; int i; for (i = 0; i < count; i++) { if (b0 != crypt_out[i][0]) continue; if (!memcmp(binary, crypt_out[i], BINARY_SIZE)) return 1; } return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; if( ((ARCH_WORD_32 *)binary)[0] != ((ARCH_WORD_32 *)crypt_key)[x+y*SIMD_COEF_32*5] ) return 0; if( ((ARCH_WORD_32 *)binary)[1] != ((ARCH_WORD_32 *)crypt_key)[x+y*SIMD_COEF_32*5+SIMD_COEF_32] ) return 0; if( ((ARCH_WORD_32 *)binary)[2] != ((ARCH_WORD_32 *)crypt_key)[x+y*SIMD_COEF_32*5+2*SIMD_COEF_32] ) return 0; if( ((ARCH_WORD_32 *)binary)[3] != ((ARCH_WORD_32 *)crypt_key)[x+y*SIMD_COEF_32*5+3*SIMD_COEF_32] ) return 0; if( ((ARCH_WORD_32 *)binary)[4] != ((ARCH_WORD_32 *)crypt_key)[x+y*SIMD_COEF_32*5+4*SIMD_COEF_32] ) return 0; return 1; #else return !memcmp(binary, crypt_out[index], BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_XSHA = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_OMP | FMT_OMP_BAD | FMT_CASE | FMT_8_BIT, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
#if FMT_EXTERNS_H extern struct fmt_main fmt_XSHA; #elif FMT_REGISTERS_H john_register_one(&fmt_XSHA); #else #include <string.h> #include "arch.h" #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1) #endif #include "simd-intrinsics.h" #include "params.h" #include "common.h" #include "formats.h" #include "sha.h" #include "johnswap.h" #include "memdbg.h" #define FORMAT_LABEL "xsha" #define FORMAT_NAME "Mac OS X 10.4 - 10.6" #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 51 #define CIPHERTEXT_LENGTH 48 #define BINARY_SIZE 20 #define BINARY_ALIGN 4 #define SALT_SIZE 4 #define SALT_ALIGN 4 #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 ) //for endianity conversion #else #define MIN_KEYS_PER_CRYPT 1 #endif static struct fmt_tests tests[] = { {"12345678F9083C7F66F46A0A102E4CC17EC08C8AF120571B", "abc"}, {"12345678EB8844BFAF2A8CBDD587A37EF8D4A290680D5818", "azertyuiop1"}, {"3234C32AAA335FD20E3F95870E5851BDBE942B79CE4FDD92", "azertyuiop2"}, {"01295B67659E95F32931CEDB3BA50289E2826AF3D5A1422F", "apple"}, {"0E6A48F765D0FFFFF6247FA80D748E615F91DD0C7431E4D9", "macintosh"}, {"A320163F1E6DB42C3949F7E232888ACC7DB7A0A17E493DBA", "test"}, {"743777471285CB3566886D4821D556E475E0DF9234308B22", "123"}, {"474379622BD7B9F84BD6E4BB52ABF9D01705EFB0A2426655", "passWOrd"}, {"597A523666A10C534495DB6333CF7EBA70C1A578CADE11A3", ""}, {NULL} }; #ifdef SIMD_COEF_32 static ARCH_WORD_32(*saved_key); static ARCH_WORD_32(*crypt_key); static ARCH_WORD_32 cur_salt; #else static char saved_key[MAX_KEYS_PER_CRYPT][PLAINTEXT_LENGTH + 1]; static int saved_len[MAX_KEYS_PER_CRYPT]; static SHA_CTX ctx_salt; static ARCH_WORD_32 crypt_out[MAX_KEYS_PER_CRYPT][5]; #endif static void init(struct fmt_main *self) { #ifdef SIMD_COEF_32 saved_key = mem_calloc_align(self->params.max_keys_per_crypt, SHA_BUF_SIZ * 4, MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(self->params.max_keys_per_crypt, BINARY_SIZE, MEM_ALIGN_SIMD); #endif } static void done(void) { #ifdef SIMD_COEF_32 MEM_FREE(crypt_key); MEM_FREE(saved_key); #endif } static int valid(char *ciphertext, struct fmt_main *self) { char *pos; /* Require uppercase hex digits (assume ASCII) */ pos = ciphertext; while (atoi16[ARCH_INDEX(*pos)] != 0x7F && *pos < 'a') pos++; return !*pos && pos - ciphertext == CIPHERTEXT_LENGTH; } static void * get_binary(char *ciphertext) { static unsigned char *out; char *p; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = ciphertext + 8; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } #ifdef SIMD_COEF_32 alter_endianity(out, BINARY_SIZE); #endif return out; } static void * get_salt(char *ciphertext) { static unsigned int outbuf[SALT_SIZE / sizeof(int)]; unsigned char *out = (unsigned char *)outbuf; char *p; int i; p = ciphertext; for (i = 0; i < SALT_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } #ifdef SIMD_COEF_32 alter_endianity(out, SALT_SIZE); #endif return out; } #ifdef SIMD_COEF_32 static int get_hash_0(int index) { unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5] & PH_MASK_0; } static int get_hash_1(int index) { unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5] & PH_MASK_1; } static int get_hash_2(int index) { unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5] & PH_MASK_2; } static int get_hash_3(int index) { unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5] & PH_MASK_3; } static int get_hash_4(int index) { unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5] & PH_MASK_4; } static int get_hash_5(int index) { unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5] & PH_MASK_5; } static int get_hash_6(int index) { unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5] & PH_MASK_6; } #else static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } #endif static int salt_hash(void *salt) { return *(ARCH_WORD_32 *) salt & (SALT_HASH_SIZE - 1); } static void set_salt(void *salt) { #ifdef SIMD_COEF_32 cur_salt = *(ARCH_WORD_32 *) salt; #else SHA1_Init(&ctx_salt); SHA1_Update(&ctx_salt, salt, SALT_SIZE); #endif } static void set_key(char *key, int index) { #ifdef SIMD_COEF_32 #if ARCH_ALLOWS_UNALIGNED const ARCH_WORD_32 *wkey = (ARCH_WORD_32 *) key; #else char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t)); const ARCH_WORD_32 *wkey = (uint32_t *) (is_aligned(key, sizeof(uint32_t)) ? key : strcpy(buf_aligned, key)); #endif ARCH_WORD_32 *keybuffer = &saved_key[(index & (SIMD_COEF_32 - 1)) + (unsigned int)index / SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32 + SIMD_COEF_32]; ARCH_WORD_32 *keybuf_word = keybuffer; unsigned int len; ARCH_WORD_32 temp; len = 4; while ((temp = *wkey++) & 0xff) { if (!(temp & 0xff00)) { *keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 8)); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80 << 16)); len += 2; goto key_cleaning; } if (!(temp & 0xff000000)) { *keybuf_word = JOHNSWAP(temp | (0x80U << 24)); len += 3; goto key_cleaning; } *keybuf_word = JOHNSWAP(temp); len += 4; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80000000; key_cleaning: keybuf_word += SIMD_COEF_32; while (*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } keybuffer[14 * SIMD_COEF_32] = len << 3; #else int length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; saved_len[index] = length; memcpy(saved_key[index], key, length); #endif } static char * get_key(int index) { #ifdef SIMD_COEF_32 unsigned int i, s; static char out[PLAINTEXT_LENGTH + 1]; s = ((unsigned int *)saved_key)[15 * SIMD_COEF_32 + (index & (SIMD_COEF_32 - 1)) + (unsigned int)index / SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32] >> 3; for (i = 0; i < (s - SALT_SIZE); i++) out[i] = ((char *)saved_key)[GETPOS((i + SALT_SIZE), index)]; out[i] = 0; return (char *)out; #else saved_key[index][saved_len[index]] = 0; return saved_key[index]; #endif } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; #ifdef SIMD_COEF_32 int i = 0; unsigned int *in = &saved_key[i * NBKEYS * SHA_BUF_SIZ]; unsigned int *out = &crypt_key[i * NBKEYS * BINARY_SIZE / 4]; unsigned int j; for (j = 0; j < NBKEYS; j++) in[(j & (SIMD_COEF_32 - 1)) + j / SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32] = cur_salt; SIMDSHA1body(in, out, NULL, SSEi_MIXED_IN); #else int i; for (i = 0; i < count; i++) { SHA_CTX ctx; memcpy(&ctx, &ctx_salt, sizeof(ctx)); SHA1_Update(&ctx, saved_key[i], saved_len[i]); SHA1_Final((unsigned char *)(crypt_out[i]), &ctx); } #endif return count; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x, y = 0; for (x = 0; x < SIMD_COEF_32; x++) { if (((ARCH_WORD_32 *) binary)[0] == ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5]) return 1; } return 0; #else ARCH_WORD_32 b0 = *(ARCH_WORD_32 *) binary; int i; for (i = 0; i < count; i++) { if (b0 != crypt_out[i][0]) continue; if (!memcmp(binary, crypt_out[i], BINARY_SIZE)) return 1; } return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; if (((ARCH_WORD_32 *) binary)[0] != ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5]) return 0; if (((ARCH_WORD_32 *) binary)[1] != ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5 + SIMD_COEF_32]) return 0; if (((ARCH_WORD_32 *) binary)[2] != ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5 + 2 * SIMD_COEF_32]) return 0; if (((ARCH_WORD_32 *) binary)[3] != ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5 + 3 * SIMD_COEF_32]) return 0; if (((ARCH_WORD_32 *) binary)[4] != ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5 + 4 * SIMD_COEF_32]) return 0; return 1; #else return !memcmp(binary, crypt_out[index], BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_XSHA = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_OMP | FMT_OMP_BAD | FMT_CASE | FMT_8_BIT, {NULL}, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, {NULL}, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
#if FMT_EXTERNS_H extern struct fmt_main fmt_XSHA; #elif FMT_REGISTERS_H john_register_one(&fmt_XSHA); #else #include <string.h> #include "arch.h" #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1) #ifdef _OPENMP static unsigned int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 128 #endif #endif #endif #include "simd-intrinsics.h" #include "params.h" #include "common.h" #include "formats.h" #include "sha.h" #include "johnswap.h" #include "memdbg.h" #define FORMAT_LABEL "xsha" #define FORMAT_NAME "Mac OS X 10.4 - 10.6" #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 51 #define CIPHERTEXT_LENGTH 48 #define BINARY_SIZE 20 #define BINARY_ALIGN 4 #define SALT_SIZE 4 #define SALT_ALIGN 4 #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 ) //for endianity conversion #else #define MIN_KEYS_PER_CRYPT 1 #ifdef _OPENMP #define MAX_KEYS_PER_CRYPT (0x200 * 3) #else #define MAX_KEYS_PER_CRYPT 0x100 #endif #endif static struct fmt_tests tests[] = { {"12345678F9083C7F66F46A0A102E4CC17EC08C8AF120571B", "abc"}, {"12345678EB8844BFAF2A8CBDD587A37EF8D4A290680D5818", "azertyuiop1"}, {"3234C32AAA335FD20E3F95870E5851BDBE942B79CE4FDD92", "azertyuiop2"}, {"01295B67659E95F32931CEDB3BA50289E2826AF3D5A1422F", "apple"}, {"0E6A48F765D0FFFFF6247FA80D748E615F91DD0C7431E4D9", "macintosh"}, {"A320163F1E6DB42C3949F7E232888ACC7DB7A0A17E493DBA", "test"}, {"743777471285CB3566886D4821D556E475E0DF9234308B22", "123"}, {"474379622BD7B9F84BD6E4BB52ABF9D01705EFB0A2426655", "passWOrd"}, {"597A523666A10C534495DB6333CF7EBA70C1A578CADE11A3", ""}, {NULL} }; #ifdef SIMD_COEF_32 static ARCH_WORD_32(*saved_key); static ARCH_WORD_32(*crypt_key); static ARCH_WORD_32 cur_salt; #else static char saved_key[MAX_KEYS_PER_CRYPT][PLAINTEXT_LENGTH + 1]; static int saved_len[MAX_KEYS_PER_CRYPT]; static SHA_CTX ctx_salt; static ARCH_WORD_32 crypt_out[MAX_KEYS_PER_CRYPT][5]; #endif static void init(struct fmt_main *self) { #ifdef SIMD_COEF_32 #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt = omp_t * NBKEYS; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt = omp_t * NBKEYS; #endif saved_key = mem_calloc_align(self->params.max_keys_per_crypt, SHA_BUF_SIZ * 4, MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(self->params.max_keys_per_crypt, BINARY_SIZE, MEM_ALIGN_SIMD); #endif } static void done(void) { #ifdef SIMD_COEF_32 MEM_FREE(crypt_key); MEM_FREE(saved_key); #endif } static int valid(char *ciphertext, struct fmt_main *self) { char *pos; /* Require uppercase hex digits (assume ASCII) */ pos = ciphertext; while (atoi16[ARCH_INDEX(*pos)] != 0x7F && *pos < 'a') pos++; return !*pos && pos - ciphertext == CIPHERTEXT_LENGTH; } static void * get_binary(char *ciphertext) { static unsigned char *out; char *p; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = ciphertext + 8; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } #ifdef SIMD_COEF_32 alter_endianity(out, BINARY_SIZE); #endif return out; } static void * get_salt(char *ciphertext) { static unsigned int outbuf[SALT_SIZE / sizeof(int)]; unsigned char *out = (unsigned char *)outbuf; char *p; int i; p = ciphertext; for (i = 0; i < SALT_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } #ifdef SIMD_COEF_32 alter_endianity(out, SALT_SIZE); #endif return out; } #ifdef SIMD_COEF_32 static int get_hash_0(int index) { unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5] & PH_MASK_0; } static int get_hash_1(int index) { unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5] & PH_MASK_1; } static int get_hash_2(int index) { unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5] & PH_MASK_2; } static int get_hash_3(int index) { unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5] & PH_MASK_3; } static int get_hash_4(int index) { unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5] & PH_MASK_4; } static int get_hash_5(int index) { unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5] & PH_MASK_5; } static int get_hash_6(int index) { unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5] & PH_MASK_6; } #else static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } #endif static int salt_hash(void *salt) { return *(ARCH_WORD_32 *) salt & (SALT_HASH_SIZE - 1); } static void set_salt(void *salt) { #ifdef SIMD_COEF_32 cur_salt = *(ARCH_WORD_32 *) salt; #else SHA1_Init(&ctx_salt); SHA1_Update(&ctx_salt, salt, SALT_SIZE); #endif } static void set_key(char *key, int index) { #ifdef SIMD_COEF_32 #if ARCH_ALLOWS_UNALIGNED const ARCH_WORD_32 *wkey = (ARCH_WORD_32 *) key; #else char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t)); const ARCH_WORD_32 *wkey = (uint32_t *) (is_aligned(key, sizeof(uint32_t)) ? key : strcpy(buf_aligned, key)); #endif ARCH_WORD_32 *keybuffer = &saved_key[(index & (SIMD_COEF_32 - 1)) + (unsigned int)index / SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32 + SIMD_COEF_32]; ARCH_WORD_32 *keybuf_word = keybuffer; unsigned int len; ARCH_WORD_32 temp; len = 4; while ((temp = *wkey++) & 0xff) { if (!(temp & 0xff00)) { *keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 8)); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80 << 16)); len += 2; goto key_cleaning; } if (!(temp & 0xff000000)) { *keybuf_word = JOHNSWAP(temp | (0x80U << 24)); len += 3; goto key_cleaning; } *keybuf_word = JOHNSWAP(temp); len += 4; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80000000; key_cleaning: keybuf_word += SIMD_COEF_32; while (*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } keybuffer[14 * SIMD_COEF_32] = len << 3; #else int length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; saved_len[index] = length; memcpy(saved_key[index], key, length); #endif } static char * get_key(int index) { #ifdef SIMD_COEF_32 unsigned int i, s; static char out[PLAINTEXT_LENGTH + 1]; s = ((unsigned int *)saved_key)[15 * SIMD_COEF_32 + (index & (SIMD_COEF_32 - 1)) + (unsigned int)index / SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32] >> 3; for (i = 0; i < (s - SALT_SIZE); i++) out[i] = ((char *)saved_key)[GETPOS((i + SALT_SIZE), index)]; out[i] = 0; return (char *)out; #else saved_key[index][saved_len[index]] = 0; return saved_key[index]; #endif } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; #ifdef SIMD_COEF_32 int i = 0; #if defined(_OPENMP) #pragma omp parallel for for (i = 0; i < omp_t; i++) { #endif unsigned int *in = &saved_key[i * NBKEYS * SHA_BUF_SIZ]; unsigned int *out = &crypt_key[i * NBKEYS * BINARY_SIZE / 4]; unsigned int j; for (j = 0; j < NBKEYS; j++) in[(j & (SIMD_COEF_32 - 1)) + j / SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32] = cur_salt; SIMDSHA1body(in, out, NULL, SSEi_MIXED_IN); #if defined(_OPENMP) } #endif #else int i; #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(ctx_salt, saved_key, saved_len, crypt_out) #endif for (i = 0; i < count; i++) { SHA_CTX ctx; memcpy(&ctx, &ctx_salt, sizeof(ctx)); SHA1_Update(&ctx, saved_key[i], saved_len[i]); SHA1_Final((unsigned char *)(crypt_out[i]), &ctx); } #endif return count; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x, y = 0; #ifdef _OPENMP for (; y < SIMD_PARA_SHA1 * omp_t; y++) #else for (; y < SIMD_PARA_SHA1; y++) #endif for (x = 0; x < SIMD_COEF_32; x++) { if (((ARCH_WORD_32 *) binary)[0] == ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5]) return 1; } return 0; #else ARCH_WORD_32 b0 = *(ARCH_WORD_32 *) binary; int i; for (i = 0; i < count; i++) { if (b0 != crypt_out[i][0]) continue; if (!memcmp(binary, crypt_out[i], BINARY_SIZE)) return 1; } return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; if (((ARCH_WORD_32 *) binary)[0] != ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5]) return 0; if (((ARCH_WORD_32 *) binary)[1] != ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5 + SIMD_COEF_32]) return 0; if (((ARCH_WORD_32 *) binary)[2] != ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5 + 2 * SIMD_COEF_32]) return 0; if (((ARCH_WORD_32 *) binary)[3] != ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5 + 3 * SIMD_COEF_32]) return 0; if (((ARCH_WORD_32 *) binary)[4] != ((ARCH_WORD_32 *) crypt_key)[x + y * SIMD_COEF_32 * 5 + 4 * SIMD_COEF_32]) return 0; return 1; #else return !memcmp(binary, crypt_out[index], BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_XSHA = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_OMP | FMT_OMP_BAD | FMT_CASE | FMT_8_BIT, {NULL}, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, {NULL}, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
fill_r_3c.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <stdio.h> #include <complex.h> #include "config.h" #include "cint.h" int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter); int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); /* * out[naoi,naoj,naok,comp] in F-order */ void GTOr3c_fill_s1(int (*intor)(), double complex *out, double complex *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t nij = naoi * naoj; const int dims[] = {naoi, naoj, naok}; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += jp * naoi + ip; int ksh, k0; int shls[3]; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; k0 = ao_loc[ksh ] - ao_loc[ksh0]; (*intor)(out+k0*nij, dims, shls, atm, natm, bas, nbas, env, cintopt, buf); } } static void zcopy_s2_igtj(double complex *out, double complex *in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pout[j] = pin[j*di+i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } static void zcopy_s2_ieqj(double complex *out, double complex *in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pout[j] = pin[j*di+i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } /* * out[comp,naok,nij] in C-order * nij = i1*(i1+1)/2 - i0*(i0+1)/2 * [ \ ] * [**** ] * [***** ] * [*****. ] <= . may not be filled, if jsh-upper-bound < ish-upper-bound * [ \] */ void GTOr3c_fill_s2ij(int (*intor)(), double complex *out, double complex *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int i0 = ao_loc[ish0]; const int i1 = ao_loc[ish1]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off = i0 * (i0 + 1) / 2; const size_t nij = i1 * (i1 + 1) / 2 - off; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; out += ip * (ip + 1) / 2 - off + jp; int ksh, dk, k0; int shls[3]; dk = GTOmax_shell_dim(ao_loc, shls_slice, 3); double *cache = (double *)(buf + di * dj * dk * comp); shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; dk = ao_loc[ksh+1] - ao_loc[ksh]; k0 = ao_loc[ksh ] - ao_loc[ksh0]; (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache); if (ip != jp) { zcopy_s2_igtj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk); } else { zcopy_s2_ieqj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk); } } } void GTOr3c_fill_s2jk(int (*intor)(), double complex *out, double complex *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { fprintf(stderr, "GTOr3c_fill_s2jk not implemented\n"); exit(1); } void GTOr3c_drv(int (*intor)(), void (*fill)(), double complex *eri, int comp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 3); const int cache_size = GTOmax_cache_size(intor, shls_slice, 3, atm, natm, bas, nbas, env); #pragma omp parallel { int ish, jsh, ij; double complex *buf = malloc(sizeof(double complex) * (di*di*di*comp + cache_size/2)); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; (*fill)(intor, eri, buf, comp, ish, jsh, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); } }
#include <stdlib.h> #include <stdio.h> #include <complex.h> #include "config.h" #include "cint.h" int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter); int GTOmax_cache_size(int (*intor) (), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); /* * out[naoi,naoj,naok,comp] in F-order */ void GTOr3c_fill_s1(int (*intor) (), double complex * out, double complex * buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt * cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t nij = naoi * naoj; const int dims[] = {naoi, naoj, naok}; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += jp * naoi + ip; int ksh, k0; int shls[3]; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; k0 = ao_loc[ksh] - ao_loc[ksh0]; (*intor) (out + k0 * nij, dims, shls, atm, natm, bas, nbas, env, cintopt, buf); } } static void zcopy_s2_igtj(double complex * out, double complex * in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pout[j] = pin[j * di + i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } static void zcopy_s2_ieqj(double complex * out, double complex * in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pout[j] = pin[j * di + i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } /* * out[comp,naok,nij] in C-order nij = i1*(i1+1)/2 - i0*(i0+1)/2 [ \ ] [**** * ] [***** ] [*****. ] <= . may not be filled, if jsh-upper-bound < * ish-upper-bound [ \] */ void GTOr3c_fill_s2ij(int (*intor) (), double complex * out, double complex * buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt * cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int i0 = ao_loc[ish0]; const int i1 = ao_loc[ish1]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off = i0 * (i0 + 1) / 2; const size_t nij = i1 * (i1 + 1) / 2 - off; const size_t nijk = nij * naok; const int di = ao_loc[ish + 1] - ao_loc[ish]; const int dj = ao_loc[jsh + 1] - ao_loc[jsh]; out += ip * (ip + 1) / 2 - off + jp; int ksh, dk, k0; int shls[3]; dk = GTOmax_shell_dim(ao_loc, shls_slice, 3); double *cache = (double *)(buf + di * dj * dk * comp); shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; dk = ao_loc[ksh + 1] - ao_loc[ksh]; k0 = ao_loc[ksh] - ao_loc[ksh0]; (*intor) (buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache); if (ip != jp) { zcopy_s2_igtj(out + k0 * nij, buf, comp, ip, nij, nijk, di, dj, dk); } else { zcopy_s2_ieqj(out + k0 * nij, buf, comp, ip, nij, nijk, di, dj, dk); } } } void GTOr3c_fill_s2jk(int (*intor) (), double complex * out, double complex * buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt * cintopt, int *atm, int natm, int *bas, int nbas, double *env) { fprintf(stderr, "GTOr3c_fill_s2jk not implemented\n"); exit(1); } void GTOr3c_drv(int (*intor) (), void (*fill) (), double complex * eri, int comp, int *shls_slice, int *ao_loc, CINTOpt * cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 3); const int cache_size = GTOmax_cache_size(intor, shls_slice, 3, atm, natm, bas, nbas, env); int ish, jsh, ij; double complex *buf = malloc(sizeof(double complex) * (di * di * di * comp + cache_size / 2)); for (ij = 0; ij < nish * njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; (*fill) (intor, eri, buf, comp, ish, jsh, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); }
#include <stdlib.h> #include <stdio.h> #include <complex.h> #include "config.h" #include "cint.h" int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter); int GTOmax_cache_size(int (*intor) (), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); /* * out[naoi,naoj,naok,comp] in F-order */ void GTOr3c_fill_s1(int (*intor) (), double complex * out, double complex * buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt * cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t nij = naoi * naoj; const int dims[] = {naoi, naoj, naok}; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += jp * naoi + ip; int ksh, k0; int shls[3]; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; k0 = ao_loc[ksh] - ao_loc[ksh0]; (*intor) (out + k0 * nij, dims, shls, atm, natm, bas, nbas, env, cintopt, buf); } } static void zcopy_s2_igtj(double complex * out, double complex * in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pout[j] = pin[j * di + i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } static void zcopy_s2_ieqj(double complex * out, double complex * in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pout[j] = pin[j * di + i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } /* * out[comp,naok,nij] in C-order nij = i1*(i1+1)/2 - i0*(i0+1)/2 [ \ ] [**** * ] [***** ] [*****. ] <= . may not be filled, if jsh-upper-bound < * ish-upper-bound [ \] */ void GTOr3c_fill_s2ij(int (*intor) (), double complex * out, double complex * buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt * cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int i0 = ao_loc[ish0]; const int i1 = ao_loc[ish1]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off = i0 * (i0 + 1) / 2; const size_t nij = i1 * (i1 + 1) / 2 - off; const size_t nijk = nij * naok; const int di = ao_loc[ish + 1] - ao_loc[ish]; const int dj = ao_loc[jsh + 1] - ao_loc[jsh]; out += ip * (ip + 1) / 2 - off + jp; int ksh, dk, k0; int shls[3]; dk = GTOmax_shell_dim(ao_loc, shls_slice, 3); double *cache = (double *)(buf + di * dj * dk * comp); shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; dk = ao_loc[ksh + 1] - ao_loc[ksh]; k0 = ao_loc[ksh] - ao_loc[ksh0]; (*intor) (buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache); if (ip != jp) { zcopy_s2_igtj(out + k0 * nij, buf, comp, ip, nij, nijk, di, dj, dk); } else { zcopy_s2_ieqj(out + k0 * nij, buf, comp, ip, nij, nijk, di, dj, dk); } } } void GTOr3c_fill_s2jk(int (*intor) (), double complex * out, double complex * buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt * cintopt, int *atm, int natm, int *bas, int nbas, double *env) { fprintf(stderr, "GTOr3c_fill_s2jk not implemented\n"); exit(1); } void GTOr3c_drv(int (*intor) (), void (*fill) (), double complex * eri, int comp, int *shls_slice, int *ao_loc, CINTOpt * cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 3); const int cache_size = GTOmax_cache_size(intor, shls_slice, 3, atm, natm, bas, nbas, env); #pragma omp parallel { int ish, jsh, ij; double complex *buf = malloc(sizeof(double complex) * (di * di * di * comp + cache_size / 2)); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish * njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; (*fill) (intor, eri, buf, comp, ish, jsh, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); } }
_phonopy.c
/* Copyright (C) 2011 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <Python.h> #include <stdio.h> #include <math.h> #include <float.h> #include <numpy/arrayobject.h> #include <dynmat.h> #include <derivative_dynmat.h> #include <kgrid.h> #include <tetrahedron_method.h> #define KB 8.6173382568083159E-05 /* PHPYCONST is defined in dynmat.h */ /* Build dynamical matrix */ static PyObject * py_transform_dynmat_to_fc(PyObject *self, PyObject *args); static PyObject * py_perm_trans_symmetrize_fc(PyObject *self, PyObject *args); static PyObject * py_perm_trans_symmetrize_compact_fc(PyObject *self, PyObject *args); static PyObject * py_transpose_compact_fc(PyObject *self, PyObject *args); static PyObject * py_get_dynamical_matrix(PyObject *self, PyObject *args); static PyObject * py_get_nac_dynamical_matrix(PyObject *self, PyObject *args); static PyObject * py_get_dipole_dipole(PyObject *self, PyObject *args); static PyObject * py_get_dipole_dipole_q0(PyObject *self, PyObject *args); static PyObject * py_get_derivative_dynmat(PyObject *self, PyObject *args); static PyObject * py_get_thermal_properties(PyObject *self, PyObject *args); static PyObject * py_distribute_fc2(PyObject *self, PyObject *args); static PyObject * py_compute_permutation(PyObject *self, PyObject *args); static PyObject * py_gsv_copy_smallest_vectors(PyObject *self, PyObject *args); static PyObject * py_gsv_set_smallest_vectors(PyObject *self, PyObject *args); static PyObject * py_thm_neighboring_grid_points(PyObject *self, PyObject *args); static PyObject * py_thm_relative_grid_address(PyObject *self, PyObject *args); static PyObject * py_thm_all_relative_grid_address(PyObject *self, PyObject *args); static PyObject * py_thm_integration_weight(PyObject *self, PyObject *args); static PyObject * py_thm_integration_weight_at_omegas(PyObject *self, PyObject *args); static PyObject * py_get_tetrahedra_frequenies(PyObject *self, PyObject *args); static PyObject * py_tetrahedron_method_dos(PyObject *self, PyObject *args); static void distribute_fc2(double (*fc2)[3][3], const int * atom_list, const int len_atom_list, PHPYCONST double (*r_carts)[3][3], const int * permutations, const int * map_atoms, const int * map_syms, const int num_rot, const int num_pos); static int compute_permutation(int * rot_atom, PHPYCONST double lat[3][3], PHPYCONST double (*pos)[3], PHPYCONST double (*rot_pos)[3], const int num_pos, const double symprec); static void gsv_copy_smallest_vectors(double (*shortest_vectors)[27][3], int * multiplicity, PHPYCONST double (*vector_lists)[27][3], PHPYCONST double (*length_lists)[27], const int num_lists, const double symprec); static void gsv_set_smallest_vectors(double (*smallest_vectors)[27][3], int *multiplicity, PHPYCONST double (*pos_to)[3], const int num_pos_to, PHPYCONST double (*pos_from)[3], const int num_pos_from, PHPYCONST int lattice_points[27][3], PHPYCONST double reduced_basis[3][3], PHPYCONST int trans_mat[3][3], const double symprec); static double get_free_energy(const double temperature, const double f); static double get_entropy(const double temperature, const double f); static double get_heat_capacity(const double temperature, const double f); static void set_index_permutation_symmetry_fc(double * fc, const int natom); static void set_translational_symmetry_fc(double * fc, const int natom); static void set_index_permutation_symmetry_compact_fc(double * fc, const int p2s[], const int s2pp[], const int nsym_list[], const int perms[], const int n_satom, const int n_patom, const int is_transpose); static void set_translational_symmetry_compact_fc(double * fc, const int p2s[], const int n_satom, const int n_patom); /* static double get_energy(double temperature, double f); */ static int nint(const double a); struct module_state { PyObject *error; }; #if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) #else #define GETSTATE(m) (&_state) static struct module_state _state; #endif static PyObject * error_out(PyObject *m) { struct module_state *st = GETSTATE(m); PyErr_SetString(st->error, "something bad happened"); return NULL; } static PyMethodDef _phonopy_methods[] = { {"error_out", (PyCFunction)error_out, METH_NOARGS, NULL}, {"transform_dynmat_to_fc", py_transform_dynmat_to_fc, METH_VARARGS, "Transform a set of dynmat to force constants"}, {"perm_trans_symmetrize_fc", py_perm_trans_symmetrize_fc, METH_VARARGS, "Enforce permutation and translational symmetry of force constants"}, {"perm_trans_symmetrize_compact_fc", py_perm_trans_symmetrize_compact_fc, METH_VARARGS, "Enforce permutation and translational symmetry of compact force constants"}, {"transpose_compact_fc", py_transpose_compact_fc, METH_VARARGS, "Transpose compact force constants"}, {"dynamical_matrix", py_get_dynamical_matrix, METH_VARARGS, "Dynamical matrix"}, {"nac_dynamical_matrix", py_get_nac_dynamical_matrix, METH_VARARGS, "NAC dynamical matrix"}, {"dipole_dipole", py_get_dipole_dipole, METH_VARARGS, "Dipole-dipole interaction"}, {"dipole_dipole_q0", py_get_dipole_dipole_q0, METH_VARARGS, "q=0 terms of Dipole-dipole interaction"}, {"derivative_dynmat", py_get_derivative_dynmat, METH_VARARGS, "Q derivative of dynamical matrix"}, {"thermal_properties", py_get_thermal_properties, METH_VARARGS, "Thermal properties"}, {"distribute_fc2", py_distribute_fc2, METH_VARARGS, "Distribute force constants for all atoms in atom_list using precomputed symmetry mappings."}, {"compute_permutation", py_compute_permutation, METH_VARARGS, "Compute indices of original points in a set of rotated points."}, {"gsv_copy_smallest_vectors", py_gsv_copy_smallest_vectors, METH_VARARGS, "Implementation detail of get_smallest_vectors."}, {"gsv_set_smallest_vectors", py_gsv_set_smallest_vectors, METH_VARARGS, "Set candidate vectors."}, {"neighboring_grid_points", py_thm_neighboring_grid_points, METH_VARARGS, "Neighboring grid points by relative grid addresses"}, {"tetrahedra_relative_grid_address", py_thm_relative_grid_address, METH_VARARGS, "Relative grid addresses of vertices of 24 tetrahedra"}, {"all_tetrahedra_relative_grid_address", py_thm_all_relative_grid_address, METH_VARARGS, "4 (all) sets of relative grid addresses of vertices of 24 tetrahedra"}, {"tetrahedra_integration_weight", py_thm_integration_weight, METH_VARARGS, "Integration weight for tetrahedron method"}, {"tetrahedra_integration_weight_at_omegas", py_thm_integration_weight_at_omegas, METH_VARARGS, "Integration weight for tetrahedron method at omegas"}, {"get_tetrahedra_frequencies", py_get_tetrahedra_frequenies, METH_VARARGS, "Run tetrahedron method"}, {"tetrahedron_method_dos", py_tetrahedron_method_dos, METH_VARARGS, "Run tetrahedron method"}, {NULL, NULL, 0, NULL} }; #if PY_MAJOR_VERSION >= 3 static int _phonopy_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->error); return 0; } static int _phonopy_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->error); return 0; } static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_phonopy", NULL, sizeof(struct module_state), _phonopy_methods, NULL, _phonopy_traverse, _phonopy_clear, NULL }; #define INITERROR return NULL PyObject * PyInit__phonopy(void) #else #define INITERROR return void init_phonopy(void) #endif { #if PY_MAJOR_VERSION >= 3 PyObject *module = PyModule_Create(&moduledef); #else PyObject *module = Py_InitModule("_phonopy", _phonopy_methods); #endif struct module_state *st; if (module == NULL) INITERROR; st = GETSTATE(module); st->error = PyErr_NewException("_phonopy.Error", NULL, NULL); if (st->error == NULL) { Py_DECREF(module); INITERROR; } #if PY_MAJOR_VERSION >= 3 return module; #endif } static PyObject * py_transform_dynmat_to_fc(PyObject *self, PyObject *args) { PyArrayObject* py_force_constants; PyArrayObject* py_dynamical_matrices; PyArrayObject* py_commensurate_points; PyArrayObject* py_shortest_vectors; PyArrayObject* py_multiplicities; PyArrayObject* py_masses; PyArrayObject* py_s2pp_map; PyArrayObject* py_fc_index_map; double* fc; double* dm; double (*comm_points)[3]; double (*shortest_vectors)[27][3]; double* masses; int* multiplicities; int* s2pp_map; int* fc_index_map; int num_patom; int num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_force_constants, &py_dynamical_matrices, &py_commensurate_points, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2pp_map, &py_fc_index_map)) { return NULL; } fc = (double*)PyArray_DATA(py_force_constants); dm = (double*)PyArray_DATA(py_dynamical_matrices); comm_points = (double(*)[3])PyArray_DATA(py_commensurate_points); shortest_vectors = (double(*)[27][3])PyArray_DATA(py_shortest_vectors); masses = (double*)PyArray_DATA(py_masses); multiplicities = (int*)PyArray_DATA(py_multiplicities); s2pp_map = (int*)PyArray_DATA(py_s2pp_map); fc_index_map = (int*)PyArray_DATA(py_fc_index_map); num_patom = PyArray_DIMS(py_multiplicities)[1]; num_satom = PyArray_DIMS(py_multiplicities)[0]; dym_transform_dynmat_to_fc(fc, dm, comm_points, shortest_vectors, multiplicities, masses, s2pp_map, fc_index_map, num_patom, num_satom); Py_RETURN_NONE; } static PyObject * py_compute_permutation(PyObject *self, PyObject *args) { PyArrayObject* permutation; PyArrayObject* lattice; PyArrayObject* positions; PyArrayObject* permuted_positions; double symprec; int* rot_atoms; double (*lat)[3]; double (*pos)[3]; double (*rot_pos)[3]; int num_pos; int is_found; if (!PyArg_ParseTuple(args, "OOOOd", &permutation, &lattice, &positions, &permuted_positions, &symprec)) { return NULL; } rot_atoms = (int*)PyArray_DATA(permutation); lat = (double(*)[3])PyArray_DATA(lattice); pos = (double(*)[3])PyArray_DATA(positions); rot_pos = (double(*)[3])PyArray_DATA(permuted_positions); num_pos = PyArray_DIMS(positions)[0]; is_found = compute_permutation(rot_atoms, lat, pos, rot_pos, num_pos, symprec); return Py_BuildValue("i", is_found); } static PyObject * py_gsv_copy_smallest_vectors(PyObject *self, PyObject *args) { PyArrayObject* py_shortest_vectors; PyArrayObject* py_multiplicity; PyArrayObject* py_vectors; PyArrayObject* py_lengths; double symprec; double (*shortest_vectors)[27][3]; double (*vectors)[27][3]; double (*lengths)[27]; int * multiplicity; int size_super, size_prim; if (!PyArg_ParseTuple(args, "OOOOd", &py_shortest_vectors, &py_multiplicity, &py_vectors, &py_lengths, &symprec)) { return NULL; } shortest_vectors = (double(*)[27][3])PyArray_DATA(py_shortest_vectors); multiplicity = (int*)PyArray_DATA(py_multiplicity); vectors = (double(*)[27][3])PyArray_DATA(py_vectors); lengths = (double(*)[27])PyArray_DATA(py_lengths); size_super = PyArray_DIMS(py_vectors)[0]; size_prim = PyArray_DIMS(py_vectors)[1]; gsv_copy_smallest_vectors(shortest_vectors, multiplicity, vectors, lengths, size_super * size_prim, symprec); Py_RETURN_NONE; } static PyObject * py_gsv_set_smallest_vectors(PyObject *self, PyObject *args) { PyArrayObject* py_smallest_vectors; PyArrayObject* py_multiplicity; PyArrayObject* py_pos_to; PyArrayObject* py_pos_from; PyArrayObject* py_lattice_points; PyArrayObject* py_reduced_basis; PyArrayObject* py_trans_mat; double symprec; double (*smallest_vectors)[27][3]; int * multiplicity; double (*pos_to)[3]; double (*pos_from)[3]; int (*lattice_points)[3]; double (*reduced_basis)[3]; int (*trans_mat)[3]; int num_pos_to, num_pos_from; if (!PyArg_ParseTuple(args, "OOOOOOOd", &py_smallest_vectors, &py_multiplicity, &py_pos_to, &py_pos_from, &py_lattice_points, &py_reduced_basis, &py_trans_mat, &symprec)) { return NULL; } smallest_vectors = (double(*)[27][3])PyArray_DATA(py_smallest_vectors); multiplicity = (int*)PyArray_DATA(py_multiplicity); pos_to = (double(*)[3])PyArray_DATA(py_pos_to); pos_from = (double(*)[3])PyArray_DATA(py_pos_from); num_pos_to = PyArray_DIMS(py_pos_to)[0]; num_pos_from = PyArray_DIMS(py_pos_from)[0]; lattice_points = (int(*)[3])PyArray_DATA(py_lattice_points); reduced_basis = (double(*)[3])PyArray_DATA(py_reduced_basis); trans_mat = (int(*)[3])PyArray_DATA(py_trans_mat); gsv_set_smallest_vectors(smallest_vectors, multiplicity, pos_to, num_pos_to, pos_from, num_pos_from, lattice_points, reduced_basis, trans_mat, symprec); Py_RETURN_NONE; } static PyObject * py_perm_trans_symmetrize_fc(PyObject *self, PyObject *args) { PyArrayObject* force_constants; double *fc; int level; int n_satom, i, j, k, l, iter; double sum; if (!PyArg_ParseTuple(args, "Oi", &force_constants, &level)) { return NULL; } fc = (double*)PyArray_DATA(force_constants); n_satom = PyArray_DIMS(force_constants)[0]; for (iter=0; iter < level; iter++) { /* Subtract drift along column */ for (j = 0; j < n_satom; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (i = 0; i < n_satom; i++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (i = 0; i < n_satom; i++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } /* Subtract drift along row */ for (i = 0; i < n_satom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (j = 0; j < n_satom; j++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (j = 0; j < n_satom; j++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } set_index_permutation_symmetry_fc(fc, n_satom); } set_translational_symmetry_fc(fc, n_satom); Py_RETURN_NONE; } static PyObject * py_perm_trans_symmetrize_compact_fc(PyObject *self, PyObject *args) { PyArrayObject* py_fc; PyArrayObject* py_permutations; PyArrayObject* py_s2pp_map; PyArrayObject* py_p2s_map; PyArrayObject* py_nsym_list; int level; double *fc; int *perms; int *s2pp; int *p2s; int *nsym_list; int n_patom, n_satom, i, j, k, l, n, iter; double sum; if (!PyArg_ParseTuple(args, "OOOOOi", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list, &level)) { return NULL; } fc = (double*)PyArray_DATA(py_fc); perms = (int*)PyArray_DATA(py_permutations); s2pp = (int*)PyArray_DATA(py_s2pp_map); p2s = (int*)PyArray_DATA(py_p2s_map); nsym_list = (int*)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; for (iter=0; iter < level; iter++) { for (n = 0; n < 2; n++) { /* transpose only */ set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 1); for (i = 0; i < n_patom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (j = 0; j < n_satom; j++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (j = 0; j < n_satom; j++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } } set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 0); } set_translational_symmetry_compact_fc(fc, p2s, n_satom, n_patom); Py_RETURN_NONE; } static PyObject * py_transpose_compact_fc(PyObject *self, PyObject *args) { PyArrayObject* py_fc; PyArrayObject* py_permutations; PyArrayObject* py_s2pp_map; PyArrayObject* py_p2s_map; PyArrayObject* py_nsym_list; double *fc; int *s2pp; int *p2s; int *nsym_list; int *perms; int n_patom, n_satom; if (!PyArg_ParseTuple(args, "OOOOO", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list)) { return NULL; } fc = (double*)PyArray_DATA(py_fc); perms = (int*)PyArray_DATA(py_permutations); s2pp = (int*)PyArray_DATA(py_s2pp_map); p2s = (int*)PyArray_DATA(py_p2s_map); nsym_list = (int*)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 1); Py_RETURN_NONE; } static PyObject * py_get_dynamical_matrix(PyObject *self, PyObject *args) { PyArrayObject* py_dynamical_matrix; PyArrayObject* py_force_constants; PyArrayObject* py_shortest_vectors; PyArrayObject* py_q; PyArrayObject* py_multiplicities; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; double* dm; double* fc; double* q; double (*svecs)[27][3]; double* m; int* multi; int* s2p_map; int* p2s_map; int num_patom; int num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dynamical_matrix, &py_force_constants, &py_q, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map)) { return NULL; } dm = (double*)PyArray_DATA(py_dynamical_matrix); fc = (double*)PyArray_DATA(py_force_constants); q = (double*)PyArray_DATA(py_q); svecs = (double(*)[27][3])PyArray_DATA(py_shortest_vectors); m = (double*)PyArray_DATA(py_masses); multi = (int*)PyArray_DATA(py_multiplicities); s2p_map = (int*)PyArray_DATA(py_s2p_map); p2s_map = (int*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; dym_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, NULL, 1); Py_RETURN_NONE; } static PyObject * py_get_nac_dynamical_matrix(PyObject *self, PyObject *args) { PyArrayObject* py_dynamical_matrix; PyArrayObject* py_force_constants; PyArrayObject* py_shortest_vectors; PyArrayObject* py_q_cart; PyArrayObject* py_q; PyArrayObject* py_multiplicities; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; PyArrayObject* py_born; double factor; double* dm; double* fc; double* q_cart; double* q; double (*svecs)[27][3]; double* m; double (*born)[3][3]; int* multi; int* s2p_map; int* p2s_map; int num_patom; int num_satom; int n; double (*charge_sum)[3][3]; if (!PyArg_ParseTuple(args, "OOOOOOOOOOd", &py_dynamical_matrix, &py_force_constants, &py_q, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map, &py_q_cart, &py_born, &factor)) return NULL; dm = (double*)PyArray_DATA(py_dynamical_matrix); fc = (double*)PyArray_DATA(py_force_constants); q_cart = (double*)PyArray_DATA(py_q_cart); q = (double*)PyArray_DATA(py_q); svecs = (double(*)[27][3])PyArray_DATA(py_shortest_vectors); m = (double*)PyArray_DATA(py_masses); born = (double(*)[3][3])PyArray_DATA(py_born); multi = (int*)PyArray_DATA(py_multiplicities); s2p_map = (int*)PyArray_DATA(py_s2p_map); p2s_map = (int*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; charge_sum = (double(*)[3][3]) malloc(sizeof(double[3][3]) * num_patom * num_patom); n = num_satom / num_patom; dym_get_charge_sum(charge_sum, num_patom, factor / n, q_cart, born); dym_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, charge_sum, 1); free(charge_sum); Py_RETURN_NONE; } static PyObject * py_get_dipole_dipole(PyObject *self, PyObject *args) { PyArrayObject* py_dd; PyArrayObject* py_dd_q0; PyArrayObject* py_G_list; PyArrayObject* py_q_cart; PyArrayObject* py_q_direction; PyArrayObject* py_born; PyArrayObject* py_dielectric; PyArrayObject* py_positions; double factor; double lambda; double tolerance; double* dd; double* dd_q0; double (*G_list)[3]; double* q_vector; double* q_direction; double (*born)[3][3]; double (*dielectric)[3]; double (*pos)[3]; int num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOOOOddd", &py_dd, &py_dd_q0, &py_G_list, &py_q_cart, &py_q_direction, &py_born, &py_dielectric, &py_positions, &factor, &lambda, &tolerance)) return NULL; dd = (double*)PyArray_DATA(py_dd); dd_q0 = (double*)PyArray_DATA(py_dd_q0); G_list = (double(*)[3])PyArray_DATA(py_G_list); if ((PyObject*)py_q_direction == Py_None) { q_direction = NULL; } else { q_direction = (double*)PyArray_DATA(py_q_direction); } q_vector = (double*)PyArray_DATA(py_q_cart); born = (double(*)[3][3])PyArray_DATA(py_born); dielectric = (double(*)[3])PyArray_DATA(py_dielectric); pos = (double(*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; dym_get_dipole_dipole(dd, /* [natom, 3, natom, 3, (real, imag)] */ dd_q0, /* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, q_vector, q_direction, born, dielectric, pos, /* [natom, 3] */ factor, /* 4pi/V*unit-conv */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject * py_get_dipole_dipole_q0(PyObject *self, PyObject *args) { PyArrayObject* py_dd_q0; PyArrayObject* py_G_list; PyArrayObject* py_born; PyArrayObject* py_dielectric; PyArrayObject* py_positions; double lambda; double tolerance; double* dd_q0; double (*G_list)[3]; double (*born)[3][3]; double (*dielectric)[3]; double (*pos)[3]; int num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOdd", &py_dd_q0, &py_G_list, &py_born, &py_dielectric, &py_positions, &lambda, &tolerance)) return NULL; dd_q0 = (double*)PyArray_DATA(py_dd_q0); G_list = (double(*)[3])PyArray_DATA(py_G_list); born = (double(*)[3][3])PyArray_DATA(py_born); dielectric = (double(*)[3])PyArray_DATA(py_dielectric); pos = (double(*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; dym_get_dipole_dipole_q0(dd_q0, /* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, born, dielectric, pos, /* [natom, 3] */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject * py_get_derivative_dynmat(PyObject *self, PyObject *args) { PyArrayObject* derivative_dynmat; PyArrayObject* py_force_constants; PyArrayObject* r_vector; PyArrayObject* lattice; PyArrayObject* q_vector; PyArrayObject* py_multiplicities; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; PyArrayObject* py_born; PyArrayObject* dielectric; PyArrayObject* q_direction; double nac_factor; double* ddm; double* fc; double* q; double* lat; double* r; double* m; int* multi; int* s2p_map; int* p2s_map; int num_patom; int num_satom; double *z; double *epsilon; double *q_dir; if (!PyArg_ParseTuple(args, "OOOOOOOOOdOOO", &derivative_dynmat, &py_force_constants, &q_vector, &lattice, /* column vectors */ &r_vector, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map, &nac_factor, &py_born, &dielectric, &q_direction)) { return NULL; } ddm = (double*)PyArray_DATA(derivative_dynmat); fc = (double*)PyArray_DATA(py_force_constants); q = (double*)PyArray_DATA(q_vector); lat = (double*)PyArray_DATA(lattice); r = (double*)PyArray_DATA(r_vector); m = (double*)PyArray_DATA(py_masses); multi = (int*)PyArray_DATA(py_multiplicities); s2p_map = (int*)PyArray_DATA(py_s2p_map); p2s_map = (int*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; if ((PyObject*)py_born == Py_None) { z = NULL; } else { z = (double*)PyArray_DATA(py_born); } if ((PyObject*)dielectric == Py_None) { epsilon = NULL; } else { epsilon = (double*)PyArray_DATA(dielectric); } if ((PyObject*)q_direction == Py_None) { q_dir = NULL; } else { q_dir = (double*)PyArray_DATA(q_direction); } get_derivative_dynmat_at_q(ddm, num_patom, num_satom, fc, q, lat, r, multi, m, s2p_map, p2s_map, nac_factor, z, epsilon, q_dir); Py_RETURN_NONE; } /* Thermal properties */ static PyObject * py_get_thermal_properties(PyObject *self, PyObject *args) { PyArrayObject* py_thermal_props; PyArrayObject* py_temperatures; PyArrayObject* py_frequencies; PyArrayObject* py_weights; double cutoff_frequency; double *temperatures; double* freqs; double *thermal_props; int* w; int num_qpoints; int num_bands; int num_temp; int i, j, k; long sum_weights; double f; double *tp; if (!PyArg_ParseTuple(args, "OOOOd", &py_thermal_props, &py_temperatures, &py_frequencies, &py_weights, &cutoff_frequency)) { return NULL; } thermal_props = (double*)PyArray_DATA(py_thermal_props); temperatures = (double*)PyArray_DATA(py_temperatures); num_temp = PyArray_DIMS(py_temperatures)[0]; freqs = (double*)PyArray_DATA(py_frequencies); num_qpoints = PyArray_DIMS(py_frequencies)[0]; w = (int*)PyArray_DATA(py_weights); num_bands = PyArray_DIMS(py_frequencies)[1]; tp = (double*)malloc(sizeof(double) * num_qpoints * num_temp * 3); for (i = 0; i < num_qpoints * num_temp * 3; i++) { tp[i] = 0; } #pragma omp parallel for private(j, k, f) for (i = 0; i < num_qpoints; i++){ for (j = 0; j < num_temp; j++) { for (k = 0; k < num_bands; k++){ f = freqs[i * num_bands + k]; if (temperatures[j] > 0 && f > cutoff_frequency) { tp[i * num_temp * 3 + j * 3] += get_free_energy(temperatures[j], f) * w[i]; tp[i * num_temp * 3 + j * 3 + 1] += get_entropy(temperatures[j], f) * w[i]; tp[i * num_temp * 3 + j * 3 + 2] += get_heat_capacity(temperatures[j], f) * w[i]; } } } } for (i = 0; i < num_qpoints; i++) { for (j = 0; j < num_temp * 3; j++) { thermal_props[j] += tp[i * num_temp * 3 + j]; } } free(tp); tp = NULL; Py_RETURN_NONE; } static PyObject * py_distribute_fc2(PyObject *self, PyObject *args) { PyArrayObject* py_force_constants; PyArrayObject* py_permutations; PyArrayObject* py_map_atoms; PyArrayObject* py_map_syms; PyArrayObject* py_atom_list; PyArrayObject* py_rotations_cart; double (*r_carts)[3][3]; double (*fc2)[3][3]; int *permutations; int *map_atoms; int *map_syms; int *atom_list; npy_intp num_pos, num_rot, len_atom_list; if (!PyArg_ParseTuple(args, "OOOOOO", &py_force_constants, &py_atom_list, &py_rotations_cart, &py_permutations, &py_map_atoms, &py_map_syms)) { return NULL; } fc2 = (double(*)[3][3])PyArray_DATA(py_force_constants); atom_list = (int*)PyArray_DATA(py_atom_list); len_atom_list = PyArray_DIMS(py_atom_list)[0]; permutations = (int*)PyArray_DATA(py_permutations); map_atoms = (int*)PyArray_DATA(py_map_atoms); map_syms = (int*)PyArray_DATA(py_map_syms); r_carts = (double(*)[3][3])PyArray_DATA(py_rotations_cart); num_rot = PyArray_DIMS(py_permutations)[0]; num_pos = PyArray_DIMS(py_permutations)[1]; if (PyArray_NDIM(py_map_atoms) != 1 || PyArray_DIMS(py_map_atoms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_atoms"); return NULL; } if (PyArray_NDIM(py_map_syms) != 1 || PyArray_DIMS(py_map_syms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_syms"); return NULL; } if (PyArray_DIMS(py_rotations_cart)[0] != num_rot) { PyErr_SetString(PyExc_ValueError, "permutations and rotations are different length"); return NULL; } distribute_fc2(fc2, atom_list, len_atom_list, r_carts, permutations, map_atoms, map_syms, num_rot, num_pos); Py_RETURN_NONE; } static PyObject *py_thm_neighboring_grid_points(PyObject *self, PyObject *args) { PyArrayObject* py_relative_grid_points; PyArrayObject* py_relative_grid_address; PyArrayObject* py_mesh; PyArrayObject* py_bz_grid_address; PyArrayObject* py_bz_map; int grid_point; int* relative_grid_points; int (*relative_grid_address)[3]; int num_relative_grid_address; int *mesh; int (*bz_grid_address)[3]; int *bz_map; if (!PyArg_ParseTuple(args, "OiOOOO", &py_relative_grid_points, &grid_point, &py_relative_grid_address, &py_mesh, &py_bz_grid_address, &py_bz_map)) { return NULL; } relative_grid_points = (int*)PyArray_DATA(py_relative_grid_points); relative_grid_address = (int(*)[3])PyArray_DATA(py_relative_grid_address); num_relative_grid_address = PyArray_DIMS(py_relative_grid_address)[0]; mesh = (int*)PyArray_DATA(py_mesh); bz_grid_address = (int(*)[3])PyArray_DATA(py_bz_grid_address); bz_map = (int*)PyArray_DATA(py_bz_map); thm_get_neighboring_grid_points(relative_grid_points, grid_point, relative_grid_address, num_relative_grid_address, mesh, bz_grid_address, bz_map); Py_RETURN_NONE; } static PyObject * py_thm_relative_grid_address(PyObject *self, PyObject *args) { PyArrayObject* py_relative_grid_address; PyArrayObject* py_reciprocal_lattice_py; int (*relative_grid_address)[4][3]; double (*reciprocal_lattice)[3]; if (!PyArg_ParseTuple(args, "OO", &py_relative_grid_address, &py_reciprocal_lattice_py)) { return NULL; } relative_grid_address = (int(*)[4][3])PyArray_DATA(py_relative_grid_address); reciprocal_lattice = (double(*)[3])PyArray_DATA(py_reciprocal_lattice_py); thm_get_relative_grid_address(relative_grid_address, reciprocal_lattice); Py_RETURN_NONE; } static PyObject * py_thm_all_relative_grid_address(PyObject *self, PyObject *args) { PyArrayObject* py_relative_grid_address; int (*relative_grid_address)[24][4][3]; if (!PyArg_ParseTuple(args, "O", &py_relative_grid_address)) { return NULL; } relative_grid_address = (int(*)[24][4][3])PyArray_DATA(py_relative_grid_address); thm_get_all_relative_grid_address(relative_grid_address); Py_RETURN_NONE; } static PyObject * py_thm_integration_weight(PyObject *self, PyObject *args) { double omega; PyArrayObject* py_tetrahedra_omegas; char* function; double (*tetrahedra_omegas)[4]; double iw; if (!PyArg_ParseTuple(args, "dOs", &omega, &py_tetrahedra_omegas, &function)) { return NULL; } tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas); iw = thm_get_integration_weight(omega, tetrahedra_omegas, function[0]); return PyFloat_FromDouble(iw); } static PyObject * py_thm_integration_weight_at_omegas(PyObject *self, PyObject *args) { PyArrayObject* py_integration_weights; PyArrayObject* py_omegas; PyArrayObject* py_tetrahedra_omegas; char* function; double *omegas; double *iw; int num_omegas; double (*tetrahedra_omegas)[4]; if (!PyArg_ParseTuple(args, "OOOs", &py_integration_weights, &py_omegas, &py_tetrahedra_omegas, &function)) { return NULL; } omegas = (double*)PyArray_DATA(py_omegas); iw = (double*)PyArray_DATA(py_integration_weights); num_omegas = (int)PyArray_DIMS(py_omegas)[0]; tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas); thm_get_integration_weight_at_omegas(iw, num_omegas, omegas, tetrahedra_omegas, function[0]); Py_RETURN_NONE; } static PyObject * py_get_tetrahedra_frequenies(PyObject *self, PyObject *args) { PyArrayObject* py_freq_tetras; PyArrayObject* py_grid_points; PyArrayObject* py_mesh; PyArrayObject* py_grid_address; PyArrayObject* py_gp_ir_index; PyArrayObject* py_relative_grid_address; PyArrayObject* py_frequencies; double* freq_tetras; int* grid_points; int num_gp_in; int* mesh; int (*grid_address)[3]; int* gp_ir_index; int (*relative_grid_address)[3]; double* frequencies; int num_band; int is_shift[3] = {0, 0, 0}; int i, j, k, gp; int g_addr[3]; int address_double[3]; if (!PyArg_ParseTuple(args, "OOOOOOO", &py_freq_tetras, &py_grid_points, &py_mesh, &py_grid_address, &py_gp_ir_index, &py_relative_grid_address, &py_frequencies)) { return NULL; } freq_tetras = (double*)PyArray_DATA(py_freq_tetras); grid_points = (int*)PyArray_DATA(py_grid_points); num_gp_in = (int)PyArray_DIMS(py_grid_points)[0]; mesh = (int*)PyArray_DATA(py_mesh); grid_address = (int(*)[3])PyArray_DATA(py_grid_address); gp_ir_index = (int*)PyArray_DATA(py_gp_ir_index); relative_grid_address = (int(*)[3])PyArray_DATA(py_relative_grid_address); frequencies = (double*)PyArray_DATA(py_frequencies); num_band = (int)PyArray_DIMS(py_frequencies)[1]; for (i = 0; i < num_gp_in; i++) { #pragma omp parallel for private(k, g_addr, gp, address_double) for (j = 0; j < num_band * 96; j++) { for (k = 0; k < 3; k++) { g_addr[k] = grid_address[grid_points[i]][k] + relative_grid_address[j % 96][k]; } kgd_get_grid_address_double_mesh(address_double, g_addr, mesh, is_shift); gp = kgd_get_grid_point_double_mesh(address_double, mesh); freq_tetras[i * num_band * 96 + j] = frequencies[gp_ir_index[gp] * num_band + j / 96]; } } Py_RETURN_NONE; } static PyObject * py_tetrahedron_method_dos(PyObject *self, PyObject *args) { PyArrayObject* py_dos; PyArrayObject* py_mesh; PyArrayObject* py_freq_points; PyArrayObject* py_frequencies; PyArrayObject* py_coef; PyArrayObject* py_grid_address; PyArrayObject* py_grid_mapping_table; PyArrayObject* py_relative_grid_address; double *dos; int* mesh; double* freq_points; int num_freq_points; double* frequencies; double* coef; int (*grid_address)[3]; int num_gp; int num_ir_gp; int num_coef; int num_band; int* grid_mapping_table; int (*relative_grid_address)[4][3]; int is_shift[3] = {0, 0, 0}; int i, j, k, l, m, q, r, count; int g_addr[3]; int ir_gps[24][4]; double tetrahedra[24][4]; int address_double[3]; int *gp2ir, *ir_grid_points, *weights; double iw; gp2ir = NULL; ir_grid_points = NULL; weights = NULL; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dos, &py_mesh, &py_freq_points, &py_frequencies, &py_coef, &py_grid_address, &py_grid_mapping_table, &py_relative_grid_address)) { return NULL; } /* dos[num_ir_gp][num_band][num_freq_points][num_coef] */ dos = (double*)PyArray_DATA(py_dos); mesh = (int*)PyArray_DATA(py_mesh); freq_points = (double*)PyArray_DATA(py_freq_points); num_freq_points = (int)PyArray_DIMS(py_freq_points)[0]; frequencies = (double*)PyArray_DATA(py_frequencies); num_ir_gp = (int)PyArray_DIMS(py_frequencies)[0]; num_band = (int)PyArray_DIMS(py_frequencies)[1]; coef = (double*)PyArray_DATA(py_coef); num_coef = (int)PyArray_DIMS(py_coef)[1]; grid_address = (int(*)[3])PyArray_DATA(py_grid_address); num_gp = (int)PyArray_DIMS(py_grid_address)[0]; grid_mapping_table = (int*)PyArray_DATA(py_grid_mapping_table); relative_grid_address = (int(*)[4][3])PyArray_DATA(py_relative_grid_address); gp2ir = (int*)malloc(sizeof(int) * num_gp); ir_grid_points = (int*)malloc(sizeof(int) * num_ir_gp); weights = (int*)malloc(sizeof(int) * num_ir_gp); count = 0; for (i = 0; i < num_gp; i++) { if (grid_mapping_table[i] == i) { gp2ir[i] = count; ir_grid_points[count] = i; weights[count] = 1; count++; } else { gp2ir[i] = gp2ir[grid_mapping_table[i]]; weights[gp2ir[i]]++; } } if (num_ir_gp != count) { printf("Something is wrong!\n"); } #pragma omp parallel for private(j, k, l, m, q, r, iw, ir_gps, g_addr, tetrahedra, address_double) for (i = 0; i < num_ir_gp; i++) { /* set 24 tetrahedra */ for (l = 0; l < 24; l++) { for (q = 0; q < 4; q++) { for (r = 0; r < 3; r++) { g_addr[r] = grid_address[ir_grid_points[i]][r] + relative_grid_address[l][q][r]; } kgd_get_grid_address_double_mesh(address_double, g_addr, mesh, is_shift); ir_gps[l][q] = gp2ir[kgd_get_grid_point_double_mesh(address_double, mesh)]; } } for (k = 0; k < num_band; k++) { for (l = 0; l < 24; l++) { for (q = 0; q < 4; q++) { tetrahedra[l][q] = frequencies[ir_gps[l][q] * num_band + k]; } } for (j = 0; j < num_freq_points; j++) { iw = thm_get_integration_weight(freq_points[j], tetrahedra, 'I') * weights[i]; for (m = 0; m < num_coef; m++) { dos[i * num_band * num_freq_points * num_coef + k * num_coef * num_freq_points + j * num_coef + m] += iw * coef[i * num_coef * num_band + m * num_band + k]; } } } } free(gp2ir); gp2ir = NULL; free(ir_grid_points); ir_grid_points = NULL; free(weights); weights = NULL; Py_RETURN_NONE; } static double get_free_energy(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ return KB * temperature * log(1 - exp(- f / (KB * temperature))); } static double get_entropy(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ double val; val = f / (2 * KB * temperature); return 1 / (2 * temperature) * f * cosh(val) / sinh(val) - KB * log(2 * sinh(val)); } static double get_heat_capacity(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ /* If val is close to 1. Then expansion is used. */ double val, val1, val2; val = f / (KB * temperature); val1 = exp(val); val2 = (val) / (val1 - 1); return KB * val1 * val2 * val2; } /* static double get_energy(double temperature, double f){ */ /* /\* temperature is defined by T (K) *\/ */ /* /\* 'f' must be given in eV. *\/ */ /* return f / (exp(f / (KB * temperature)) - 1); */ /* } */ static int compute_permutation(int * rot_atom, PHPYCONST double lat[3][3], PHPYCONST double (*pos)[3], PHPYCONST double (*rot_pos)[3], const int num_pos, const double symprec) { int i,j,k,l; int search_start; double distance2, diff_cart; double diff[3]; for (i = 0; i < num_pos; i++) { rot_atom[i] = -1; } /* optimization: Iterate primarily by pos instead of rot_pos. */ /* (find where 0 belongs in rot_atom, then where 1 belongs, etc.) */ /* Then track the first unassigned index. */ /* */ /* This works best if the permutation is close to the identity. */ /* (more specifically, if the max value of 'rot_atom[i] - i' is small) */ search_start = 0; for (i = 0; i < num_pos; i++) { while (rot_atom[search_start] >= 0) { search_start++; } for (j = search_start; j < num_pos; j++) { if (rot_atom[j] >= 0) { continue; } for (k = 0; k < 3; k++) { diff[k] = pos[i][k] - rot_pos[j][k]; diff[k] -= nint(diff[k]); } distance2 = 0; for (k = 0; k < 3; k++) { diff_cart = 0; for (l = 0; l < 3; l++) { diff_cart += lat[k][l] * diff[l]; } distance2 += diff_cart * diff_cart; } if (sqrt(distance2) < symprec) { rot_atom[j] = i; break; } } } for (i = 0; i < num_pos; i++) { if (rot_atom[i] < 0) { printf("Encounter some problem in compute_permutation.\n"); return 0; } } return 1; } /* Implementation detail of get_smallest_vectors. */ /* Finds the smallest vectors within each list and copies them to the output. */ static void gsv_copy_smallest_vectors(double (*shortest_vectors)[27][3], int * multiplicity, PHPYCONST double (*vector_lists)[27][3], PHPYCONST double (*length_lists)[27], const int num_lists, const double symprec) { int i,j,k; int count; double minimum; double (*vectors)[3]; double * lengths; for (i = 0; i < num_lists; i++) { /* Look at a single list of 27 vectors. */ lengths = length_lists[i]; vectors = vector_lists[i]; /* Compute the minimum length. */ minimum = DBL_MAX; for (j = 0; j < 27; j++) { if (lengths[j] < minimum) { minimum = lengths[j]; } } /* Copy vectors whose length is within tolerance. */ count = 0; for (j = 0; j < 27; j++) { if (lengths[j] - minimum <= symprec) { for (k = 0; k < 3; k++) { shortest_vectors[i][count][k] = vectors[j][k]; } count++; } } multiplicity[i] = count; } } static void gsv_set_smallest_vectors(double (*smallest_vectors)[27][3], int *multiplicity, PHPYCONST double (*pos_to)[3], const int num_pos_to, PHPYCONST double (*pos_from)[3], const int num_pos_from, PHPYCONST int lattice_points[27][3], PHPYCONST double reduced_basis[3][3], PHPYCONST int trans_mat[3][3], const double symprec) { int i, j, k, l, count; double length_tmp, minimum, vec_xyz; double length[27], vec[27][3]; for (i = 0; i < num_pos_to; i++) { for (j = 0; j < num_pos_from; j++) { for (k = 0; k < 27; k++) { length[k] = 0; for (l = 0; l < 3; l++) { vec[k][l] = pos_to[i][l] - pos_from[j][l] + lattice_points[k][l]; } for (l = 0; l < 3; l++) { length_tmp = (reduced_basis[l][0] * vec[k][0] + reduced_basis[l][1] * vec[k][1] + reduced_basis[l][2] * vec[k][2]); length[k] += length_tmp * length_tmp; } length[k] = sqrt(length[k]); } minimum = DBL_MAX; for (k = 0; k < 27; k++) { if (length[k] < minimum) { minimum = length[k]; } } count = 0; for (k = 0; k < 27; k++) { if (length[k] - minimum < symprec) { for (l = 0; l < 3; l++) { /* Transform to supercell coordinates */ vec_xyz = (trans_mat[l][0] * vec[k][0] + trans_mat[l][1] * vec[k][1] + trans_mat[l][2] * vec[k][2]); smallest_vectors[i * num_pos_from + j][count][l] = vec_xyz; } count++; } } multiplicity[i * num_pos_from + j] = count; } } } static void distribute_fc2(double (*fc2)[3][3], /* shape[n_pos][n_pos] */ const int * atom_list, const int len_atom_list, PHPYCONST double (*r_carts)[3][3], /* shape[n_rot] */ const int * permutations, /* shape[n_rot][n_pos] */ const int * map_atoms, /* shape [n_pos] */ const int * map_syms, /* shape [n_pos] */ const int num_rot, const int num_pos) { int i, j, k, l, m; int atom_todo, atom_done, atom_other; int sym_index; int *atom_list_reverse; double (*fc2_done)[3]; double (*fc2_todo)[3]; double (*r_cart)[3]; const int * permutation; atom_list_reverse = NULL; atom_list_reverse = (int*)malloc(sizeof(int) * num_pos); /* atom_list_reverse[!atom_done] is undefined. */ for (i = 0; i < len_atom_list; i++) { atom_done = map_atoms[atom_list[i]]; if (atom_done == atom_list[i]) { atom_list_reverse[atom_done] = i; } } for (i = 0; i < len_atom_list; i++) { /* look up how this atom maps into the done list. */ atom_todo = atom_list[i]; atom_done = map_atoms[atom_todo]; sym_index = map_syms[atom_todo]; /* skip the atoms in the done list, */ /* which are easily identified because they map to themselves. */ if (atom_todo == atom_done) { continue; } /* look up information about the rotation */ r_cart = r_carts[sym_index]; permutation = &permutations[sym_index * num_pos]; /* shape[num_pos] */ /* distribute terms from atom_done to atom_todo */ for (atom_other = 0; atom_other < num_pos; atom_other++) { fc2_done = fc2[atom_list_reverse[atom_done] * num_pos + permutation[atom_other]]; fc2_todo = fc2[i * num_pos + atom_other]; for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { /* P' = R^-1 P R */ fc2_todo[j][k] += r_cart[l][j] * r_cart[m][k] * fc2_done[l][m]; } } } } } } free(atom_list_reverse); atom_list_reverse = NULL; } static void set_index_permutation_symmetry_fc(double * fc, const int natom) { int i, j, k, l, m, n; for (i = 0; i < natom; i++) { /* non diagonal part */ for (j = i + 1; j < natom; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { m = i * natom * 9 + j * 9 + k * 3 + l; n = j * natom * 9 + i * 9 + l * 3 + k; fc[m] += fc[n]; fc[m] /= 2; fc[n] = fc[m]; } } } /* diagnoal part */ for (k = 0; k < 2; k++) { for (l = k + 1; l < 3; l++) { m = i * natom * 9 + i * 9 + k * 3 + l; n = i * natom * 9 + i * 9 + l * 3 + k; fc[m] += fc[n]; fc[m] /= 2; fc[n] = fc[m]; } } } } static void set_translational_symmetry_fc(double * fc, const int natom) { int i, j, k, l, m; double sums[3][3]; for (i = 0; i < natom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sums[k][l] = 0; m = i * natom * 9 + k * 3 + l; for (j = 0; j < natom; j++) { if (i != j) { sums[k][l] += fc[m]; } m += 9; } } } for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { fc[i * natom * 9 + i * 9 + k * 3 + l] = -(sums[k][l] + sums[l][k]) / 2; } } } } static void set_index_permutation_symmetry_compact_fc(double * fc, const int p2s[], const int s2pp[], const int nsym_list[], const int perms[], const int n_satom, const int n_patom, const int is_transpose) { int i, j, k, l, m, n, i_p, j_p, i_trans; double fc_elem; char *done; done = NULL; done = (char*)malloc(sizeof(char) * n_satom * n_patom); for (i = 0; i < n_satom * n_patom; i++) { done[i] = 0; } for (j = 0; j < n_satom; j++) { j_p = s2pp[j]; for (i_p = 0; i_p < n_patom; i_p++) { i = p2s[i_p]; if (i == j) { /* diagnoal part */ for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { if (l > k) { m = i_p * n_satom * 9 + i * 9 + k * 3 + l; n = i_p * n_satom * 9 + i * 9 + l * 3 + k; if (is_transpose) { fc_elem = fc[m]; fc[m] = fc[n]; fc[n] = fc_elem; } else { fc[m] = (fc[m] + fc[n]) / 2; fc[n] = fc[m]; } } } } } if (!done[i_p * n_satom + j]) { /* (j, i) -- nsym_list[j] --> (j', i') */ /* nsym_list[j] translates j to j' where j' is in */ /* primitive cell. The same translation sends i to i' */ /* where i' is not necessarily to be in primitive cell. */ /* Thus, i' = perms[nsym_list[j] * n_satom + i] */ i_trans = perms[nsym_list[j] * n_satom + i]; done[i_p * n_satom + j] = 1; done[j_p * n_satom + i_trans] = 1; for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { m = i_p * n_satom * 9 + j * 9 + k * 3 + l; n = j_p * n_satom * 9 + i_trans * 9 + l * 3 + k; if (is_transpose) { fc_elem = fc[m]; fc[m] = fc[n]; fc[n] = fc_elem; } else { fc[m] = (fc[n] + fc[m]) / 2; fc[n] = fc[m]; } } } } } } free(done); done = NULL; } static void set_translational_symmetry_compact_fc(double * fc, const int p2s[], const int n_satom, const int n_patom) { int j, k, l, m, i_p; double sums[3][3]; for (i_p = 0; i_p < n_patom; i_p++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sums[k][l] = 0; m = i_p * n_satom * 9 + k * 3 + l; for (j = 0; j < n_satom; j++) { if (p2s[i_p] != j) { sums[k][l] += fc[m]; } m += 9; } } } for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { fc[i_p * n_satom * 9 + p2s[i_p] * 9 + k * 3 + l] = -(sums[k][l] + sums[l][k]) / 2; } } } } static int nint(const double a) { if (a < 0.0) return (int) (a - 0.5); else return (int) (a + 0.5); }
/* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <Python.h> #include <stdio.h> #include <math.h> #include <float.h> #include <numpy/arrayobject.h> #include <dynmat.h> #include <derivative_dynmat.h> #include <kgrid.h> #include <tetrahedron_method.h> #define KB 8.6173382568083159E-05 /* PHPYCONST is defined in dynmat.h */ /* Build dynamical matrix */ static PyObject *py_transform_dynmat_to_fc(PyObject * self, PyObject * args); static PyObject *py_perm_trans_symmetrize_fc(PyObject * self, PyObject * args); static PyObject * py_perm_trans_symmetrize_compact_fc(PyObject * self, PyObject * args); static PyObject *py_transpose_compact_fc(PyObject * self, PyObject * args); static PyObject *py_get_dynamical_matrix(PyObject * self, PyObject * args); static PyObject *py_get_nac_dynamical_matrix(PyObject * self, PyObject * args); static PyObject *py_get_dipole_dipole(PyObject * self, PyObject * args); static PyObject *py_get_dipole_dipole_q0(PyObject * self, PyObject * args); static PyObject *py_get_derivative_dynmat(PyObject * self, PyObject * args); static PyObject *py_get_thermal_properties(PyObject * self, PyObject * args); static PyObject *py_distribute_fc2(PyObject * self, PyObject * args); static PyObject *py_compute_permutation(PyObject * self, PyObject * args); static PyObject *py_gsv_copy_smallest_vectors(PyObject * self, PyObject * args); static PyObject *py_gsv_set_smallest_vectors(PyObject * self, PyObject * args); static PyObject * py_thm_neighboring_grid_points(PyObject * self, PyObject * args); static PyObject * py_thm_relative_grid_address(PyObject * self, PyObject * args); static PyObject * py_thm_all_relative_grid_address(PyObject * self, PyObject * args); static PyObject * py_thm_integration_weight(PyObject * self, PyObject * args); static PyObject * py_thm_integration_weight_at_omegas(PyObject * self, PyObject * args); static PyObject *py_get_tetrahedra_frequenies(PyObject * self, PyObject * args); static PyObject *py_tetrahedron_method_dos(PyObject * self, PyObject * args); static void distribute_fc2(double (*fc2)[3][3], const int *atom_list, const int len_atom_list, PHPYCONST double (*r_carts)[3][3], const int *permutations, const int *map_atoms, const int *map_syms, const int num_rot, const int num_pos); static int compute_permutation(int *rot_atom, PHPYCONST double lat[3][3], PHPYCONST double (*pos)[3], PHPYCONST double (*rot_pos)[3], const int num_pos, const double symprec); static void gsv_copy_smallest_vectors(double (*shortest_vectors)[27][3], int *multiplicity, PHPYCONST double (*vector_lists)[27][3], PHPYCONST double (*length_lists)[27], const int num_lists, const double symprec); static void gsv_set_smallest_vectors(double (*smallest_vectors)[27][3], int *multiplicity, PHPYCONST double (*pos_to)[3], const int num_pos_to, PHPYCONST double (*pos_from)[3], const int num_pos_from, PHPYCONST int lattice_points[27][3], PHPYCONST double reduced_basis[3][3], PHPYCONST int trans_mat[3][3], const double symprec); static double get_free_energy(const double temperature, const double f); static double get_entropy(const double temperature, const double f); static double get_heat_capacity(const double temperature, const double f); static void set_index_permutation_symmetry_fc(double *fc, const int natom); static void set_translational_symmetry_fc(double *fc, const int natom); static void set_index_permutation_symmetry_compact_fc(double *fc, const int p2s[], const int s2pp[], const int nsym_list[], const int perms[], const int n_satom, const int n_patom, const int is_transpose); static void set_translational_symmetry_compact_fc(double *fc, const int p2s[], const int n_satom, const int n_patom); /* static double get_energy(double temperature, double f); */ static int nint(const double a); struct module_state { PyObject *error; }; #if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) #else #define GETSTATE(m) (&_state) static struct module_state _state; #endif static PyObject * error_out(PyObject * m) { struct module_state *st = GETSTATE(m); PyErr_SetString(st->error, "something bad happened"); return NULL; } static PyMethodDef _phonopy_methods[] = { {"error_out", (PyCFunction) error_out, METH_NOARGS, NULL}, {"transform_dynmat_to_fc", py_transform_dynmat_to_fc, METH_VARARGS, "Transform a set of dynmat to force constants"}, {"perm_trans_symmetrize_fc", py_perm_trans_symmetrize_fc, METH_VARARGS, "Enforce permutation and translational symmetry of force constants"}, {"perm_trans_symmetrize_compact_fc", py_perm_trans_symmetrize_compact_fc, METH_VARARGS, "Enforce permutation and translational symmetry of compact force constants"}, {"transpose_compact_fc", py_transpose_compact_fc, METH_VARARGS, "Transpose compact force constants"}, {"dynamical_matrix", py_get_dynamical_matrix, METH_VARARGS, "Dynamical matrix"}, {"nac_dynamical_matrix", py_get_nac_dynamical_matrix, METH_VARARGS, "NAC dynamical matrix"}, {"dipole_dipole", py_get_dipole_dipole, METH_VARARGS, "Dipole-dipole interaction"}, {"dipole_dipole_q0", py_get_dipole_dipole_q0, METH_VARARGS, "q=0 terms of Dipole-dipole interaction"}, {"derivative_dynmat", py_get_derivative_dynmat, METH_VARARGS, "Q derivative of dynamical matrix"}, {"thermal_properties", py_get_thermal_properties, METH_VARARGS, "Thermal properties"}, {"distribute_fc2", py_distribute_fc2, METH_VARARGS, "Distribute force constants for all atoms in atom_list using precomputed symmetry mappings."}, {"compute_permutation", py_compute_permutation, METH_VARARGS, "Compute indices of original points in a set of rotated points."}, {"gsv_copy_smallest_vectors", py_gsv_copy_smallest_vectors, METH_VARARGS, "Implementation detail of get_smallest_vectors."}, {"gsv_set_smallest_vectors", py_gsv_set_smallest_vectors, METH_VARARGS, "Set candidate vectors."}, {"neighboring_grid_points", py_thm_neighboring_grid_points, METH_VARARGS, "Neighboring grid points by relative grid addresses"}, {"tetrahedra_relative_grid_address", py_thm_relative_grid_address, METH_VARARGS, "Relative grid addresses of vertices of 24 tetrahedra"}, {"all_tetrahedra_relative_grid_address", py_thm_all_relative_grid_address, METH_VARARGS, "4 (all) sets of relative grid addresses of vertices of 24 tetrahedra"}, {"tetrahedra_integration_weight", py_thm_integration_weight, METH_VARARGS, "Integration weight for tetrahedron method"}, {"tetrahedra_integration_weight_at_omegas", py_thm_integration_weight_at_omegas, METH_VARARGS, "Integration weight for tetrahedron method at omegas"}, {"get_tetrahedra_frequencies", py_get_tetrahedra_frequenies, METH_VARARGS, "Run tetrahedron method"}, {"tetrahedron_method_dos", py_tetrahedron_method_dos, METH_VARARGS, "Run tetrahedron method"}, {NULL, NULL, 0, NULL} }; #if PY_MAJOR_VERSION >= 3 static int _phonopy_traverse(PyObject * m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->error); return 0; } static int _phonopy_clear(PyObject * m) { Py_CLEAR(GETSTATE(m)->error); return 0; } static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_phonopy", NULL, sizeof(struct module_state), _phonopy_methods, NULL, _phonopy_traverse, _phonopy_clear, NULL }; #define INITERROR return NULL PyObject * PyInit__phonopy(void) #else #define INITERROR return void init_phonopy(void) #endif { #if PY_MAJOR_VERSION >= 3 PyObject *module = PyModule_Create(&moduledef); #else PyObject *module = Py_InitModule("_phonopy", _phonopy_methods); #endif struct module_state *st; if (module == NULL) INITERROR; st = GETSTATE(module); st->error = PyErr_NewException("_phonopy.Error", NULL, NULL); if (st->error == NULL) { Py_DECREF(module); INITERROR; } #if PY_MAJOR_VERSION >= 3 return module; #endif } static PyObject * py_transform_dynmat_to_fc(PyObject * self, PyObject * args) { PyArrayObject *py_force_constants; PyArrayObject *py_dynamical_matrices; PyArrayObject *py_commensurate_points; PyArrayObject *py_shortest_vectors; PyArrayObject *py_multiplicities; PyArrayObject *py_masses; PyArrayObject *py_s2pp_map; PyArrayObject *py_fc_index_map; double *fc; double *dm; double (*comm_points)[3]; double (*shortest_vectors)[27][3]; double *masses; int *multiplicities; int *s2pp_map; int *fc_index_map; int num_patom; int num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_force_constants, &py_dynamical_matrices, &py_commensurate_points, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2pp_map, &py_fc_index_map)) { return NULL; } fc = (double *)PyArray_DATA(py_force_constants); dm = (double *)PyArray_DATA(py_dynamical_matrices); comm_points = (double (*)[3])PyArray_DATA(py_commensurate_points); shortest_vectors = (double (*)[27][3])PyArray_DATA(py_shortest_vectors); masses = (double *)PyArray_DATA(py_masses); multiplicities = (int *)PyArray_DATA(py_multiplicities); s2pp_map = (int *)PyArray_DATA(py_s2pp_map); fc_index_map = (int *)PyArray_DATA(py_fc_index_map); num_patom = PyArray_DIMS(py_multiplicities)[1]; num_satom = PyArray_DIMS(py_multiplicities)[0]; dym_transform_dynmat_to_fc(fc, dm, comm_points, shortest_vectors, multiplicities, masses, s2pp_map, fc_index_map, num_patom, num_satom); Py_RETURN_NONE; } static PyObject * py_compute_permutation(PyObject * self, PyObject * args) { PyArrayObject *permutation; PyArrayObject *lattice; PyArrayObject *positions; PyArrayObject *permuted_positions; double symprec; int *rot_atoms; double (*lat)[3]; double (*pos)[3]; double (*rot_pos)[3]; int num_pos; int is_found; if (!PyArg_ParseTuple(args, "OOOOd", &permutation, &lattice, &positions, &permuted_positions, &symprec)) { return NULL; } rot_atoms = (int *)PyArray_DATA(permutation); lat = (double (*)[3])PyArray_DATA(lattice); pos = (double (*)[3])PyArray_DATA(positions); rot_pos = (double (*)[3])PyArray_DATA(permuted_positions); num_pos = PyArray_DIMS(positions)[0]; is_found = compute_permutation(rot_atoms, lat, pos, rot_pos, num_pos, symprec); return Py_BuildValue("i", is_found); } static PyObject * py_gsv_copy_smallest_vectors(PyObject * self, PyObject * args) { PyArrayObject *py_shortest_vectors; PyArrayObject *py_multiplicity; PyArrayObject *py_vectors; PyArrayObject *py_lengths; double symprec; double (*shortest_vectors)[27][3]; double (*vectors)[27][3]; double (*lengths)[27]; int *multiplicity; int size_super, size_prim; if (!PyArg_ParseTuple(args, "OOOOd", &py_shortest_vectors, &py_multiplicity, &py_vectors, &py_lengths, &symprec)) { return NULL; } shortest_vectors = (double (*)[27][3])PyArray_DATA(py_shortest_vectors); multiplicity = (int *)PyArray_DATA(py_multiplicity); vectors = (double (*)[27][3])PyArray_DATA(py_vectors); lengths = (double (*)[27])PyArray_DATA(py_lengths); size_super = PyArray_DIMS(py_vectors)[0]; size_prim = PyArray_DIMS(py_vectors)[1]; gsv_copy_smallest_vectors(shortest_vectors, multiplicity, vectors, lengths, size_super * size_prim, symprec); Py_RETURN_NONE; } static PyObject * py_gsv_set_smallest_vectors(PyObject * self, PyObject * args) { PyArrayObject *py_smallest_vectors; PyArrayObject *py_multiplicity; PyArrayObject *py_pos_to; PyArrayObject *py_pos_from; PyArrayObject *py_lattice_points; PyArrayObject *py_reduced_basis; PyArrayObject *py_trans_mat; double symprec; double (*smallest_vectors)[27][3]; int *multiplicity; double (*pos_to)[3]; double (*pos_from)[3]; int (*lattice_points)[3]; double (*reduced_basis)[3]; int (*trans_mat)[3]; int num_pos_to, num_pos_from; if (!PyArg_ParseTuple(args, "OOOOOOOd", &py_smallest_vectors, &py_multiplicity, &py_pos_to, &py_pos_from, &py_lattice_points, &py_reduced_basis, &py_trans_mat, &symprec)) { return NULL; } smallest_vectors = (double (*)[27][3])PyArray_DATA(py_smallest_vectors); multiplicity = (int *)PyArray_DATA(py_multiplicity); pos_to = (double (*)[3])PyArray_DATA(py_pos_to); pos_from = (double (*)[3])PyArray_DATA(py_pos_from); num_pos_to = PyArray_DIMS(py_pos_to)[0]; num_pos_from = PyArray_DIMS(py_pos_from)[0]; lattice_points = (int (*)[3])PyArray_DATA(py_lattice_points); reduced_basis = (double (*)[3])PyArray_DATA(py_reduced_basis); trans_mat = (int (*)[3])PyArray_DATA(py_trans_mat); gsv_set_smallest_vectors(smallest_vectors, multiplicity, pos_to, num_pos_to, pos_from, num_pos_from, lattice_points, reduced_basis, trans_mat, symprec); Py_RETURN_NONE; } static PyObject * py_perm_trans_symmetrize_fc(PyObject * self, PyObject * args) { PyArrayObject *force_constants; double *fc; int level; int n_satom, i, j, k, l, iter; double sum; if (!PyArg_ParseTuple(args, "Oi", &force_constants, &level)) { return NULL; } fc = (double *)PyArray_DATA(force_constants); n_satom = PyArray_DIMS(force_constants)[0]; for (iter = 0; iter < level; iter++) { /* Subtract drift along column */ for (j = 0; j < n_satom; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (i = 0; i < n_satom; i++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (i = 0; i < n_satom; i++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } /* Subtract drift along row */ for (i = 0; i < n_satom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (j = 0; j < n_satom; j++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (j = 0; j < n_satom; j++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } set_index_permutation_symmetry_fc(fc, n_satom); } set_translational_symmetry_fc(fc, n_satom); Py_RETURN_NONE; } static PyObject * py_perm_trans_symmetrize_compact_fc(PyObject * self, PyObject * args) { PyArrayObject *py_fc; PyArrayObject *py_permutations; PyArrayObject *py_s2pp_map; PyArrayObject *py_p2s_map; PyArrayObject *py_nsym_list; int level; double *fc; int *perms; int *s2pp; int *p2s; int *nsym_list; int n_patom, n_satom, i, j, k, l, n, iter; double sum; if (!PyArg_ParseTuple(args, "OOOOOi", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list, &level)) { return NULL; } fc = (double *)PyArray_DATA(py_fc); perms = (int *)PyArray_DATA(py_permutations); s2pp = (int *)PyArray_DATA(py_s2pp_map); p2s = (int *)PyArray_DATA(py_p2s_map); nsym_list = (int *)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; for (iter = 0; iter < level; iter++) { for (n = 0; n < 2; n++) { /* transpose only */ set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 1); for (i = 0; i < n_patom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (j = 0; j < n_satom; j++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (j = 0; j < n_satom; j++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } } set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 0); } set_translational_symmetry_compact_fc(fc, p2s, n_satom, n_patom); Py_RETURN_NONE; } static PyObject * py_transpose_compact_fc(PyObject * self, PyObject * args) { PyArrayObject *py_fc; PyArrayObject *py_permutations; PyArrayObject *py_s2pp_map; PyArrayObject *py_p2s_map; PyArrayObject *py_nsym_list; double *fc; int *s2pp; int *p2s; int *nsym_list; int *perms; int n_patom, n_satom; if (!PyArg_ParseTuple(args, "OOOOO", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list)) { return NULL; } fc = (double *)PyArray_DATA(py_fc); perms = (int *)PyArray_DATA(py_permutations); s2pp = (int *)PyArray_DATA(py_s2pp_map); p2s = (int *)PyArray_DATA(py_p2s_map); nsym_list = (int *)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 1); Py_RETURN_NONE; } static PyObject * py_get_dynamical_matrix(PyObject * self, PyObject * args) { PyArrayObject *py_dynamical_matrix; PyArrayObject *py_force_constants; PyArrayObject *py_shortest_vectors; PyArrayObject *py_q; PyArrayObject *py_multiplicities; PyArrayObject *py_masses; PyArrayObject *py_s2p_map; PyArrayObject *py_p2s_map; double *dm; double *fc; double *q; double (*svecs)[27][3]; double *m; int *multi; int *s2p_map; int *p2s_map; int num_patom; int num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dynamical_matrix, &py_force_constants, &py_q, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map)) { return NULL; } dm = (double *)PyArray_DATA(py_dynamical_matrix); fc = (double *)PyArray_DATA(py_force_constants); q = (double *)PyArray_DATA(py_q); svecs = (double (*)[27][3])PyArray_DATA(py_shortest_vectors); m = (double *)PyArray_DATA(py_masses); multi = (int *)PyArray_DATA(py_multiplicities); s2p_map = (int *)PyArray_DATA(py_s2p_map); p2s_map = (int *)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; dym_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, NULL, 1); Py_RETURN_NONE; } static PyObject * py_get_nac_dynamical_matrix(PyObject * self, PyObject * args) { PyArrayObject *py_dynamical_matrix; PyArrayObject *py_force_constants; PyArrayObject *py_shortest_vectors; PyArrayObject *py_q_cart; PyArrayObject *py_q; PyArrayObject *py_multiplicities; PyArrayObject *py_masses; PyArrayObject *py_s2p_map; PyArrayObject *py_p2s_map; PyArrayObject *py_born; double factor; double *dm; double *fc; double *q_cart; double *q; double (*svecs)[27][3]; double *m; double (*born)[3][3]; int *multi; int *s2p_map; int *p2s_map; int num_patom; int num_satom; int n; double (*charge_sum)[3][3]; if (!PyArg_ParseTuple(args, "OOOOOOOOOOd", &py_dynamical_matrix, &py_force_constants, &py_q, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map, &py_q_cart, &py_born, &factor)) return NULL; dm = (double *)PyArray_DATA(py_dynamical_matrix); fc = (double *)PyArray_DATA(py_force_constants); q_cart = (double *)PyArray_DATA(py_q_cart); q = (double *)PyArray_DATA(py_q); svecs = (double (*)[27][3])PyArray_DATA(py_shortest_vectors); m = (double *)PyArray_DATA(py_masses); born = (double (*)[3][3])PyArray_DATA(py_born); multi = (int *)PyArray_DATA(py_multiplicities); s2p_map = (int *)PyArray_DATA(py_s2p_map); p2s_map = (int *)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; charge_sum = (double (*)[3][3]) malloc(sizeof(double[3][3]) * num_patom * num_patom); n = num_satom / num_patom; dym_get_charge_sum(charge_sum, num_patom, factor / n, q_cart, born); dym_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, charge_sum, 1); free(charge_sum); Py_RETURN_NONE; } static PyObject * py_get_dipole_dipole(PyObject * self, PyObject * args) { PyArrayObject *py_dd; PyArrayObject *py_dd_q0; PyArrayObject *py_G_list; PyArrayObject *py_q_cart; PyArrayObject *py_q_direction; PyArrayObject *py_born; PyArrayObject *py_dielectric; PyArrayObject *py_positions; double factor; double lambda; double tolerance; double *dd; double *dd_q0; double (*G_list)[3]; double *q_vector; double *q_direction; double (*born)[3][3]; double (*dielectric)[3]; double (*pos)[3]; int num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOOOOddd", &py_dd, &py_dd_q0, &py_G_list, &py_q_cart, &py_q_direction, &py_born, &py_dielectric, &py_positions, &factor, &lambda, &tolerance)) return NULL; dd = (double *)PyArray_DATA(py_dd); dd_q0 = (double *)PyArray_DATA(py_dd_q0); G_list = (double (*)[3])PyArray_DATA(py_G_list); if ((PyObject *) py_q_direction == Py_None) { q_direction = NULL; } else { q_direction = (double *)PyArray_DATA(py_q_direction); } q_vector = (double *)PyArray_DATA(py_q_cart); born = (double (*)[3][3])PyArray_DATA(py_born); dielectric = (double (*)[3])PyArray_DATA(py_dielectric); pos = (double (*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; dym_get_dipole_dipole(dd, /* [natom, 3, natom, 3, (real, imag)] */ dd_q0,/* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, q_vector, q_direction, born, dielectric, pos, /* [natom, 3] */ factor, /* 4pi/V*unit-conv */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject * py_get_dipole_dipole_q0(PyObject * self, PyObject * args) { PyArrayObject *py_dd_q0; PyArrayObject *py_G_list; PyArrayObject *py_born; PyArrayObject *py_dielectric; PyArrayObject *py_positions; double lambda; double tolerance; double *dd_q0; double (*G_list)[3]; double (*born)[3][3]; double (*dielectric)[3]; double (*pos)[3]; int num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOdd", &py_dd_q0, &py_G_list, &py_born, &py_dielectric, &py_positions, &lambda, &tolerance)) return NULL; dd_q0 = (double *)PyArray_DATA(py_dd_q0); G_list = (double (*)[3])PyArray_DATA(py_G_list); born = (double (*)[3][3])PyArray_DATA(py_born); dielectric = (double (*)[3])PyArray_DATA(py_dielectric); pos = (double (*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; dym_get_dipole_dipole_q0(dd_q0, /* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, born, dielectric, pos, /* [natom, 3] */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject * py_get_derivative_dynmat(PyObject * self, PyObject * args) { PyArrayObject *derivative_dynmat; PyArrayObject *py_force_constants; PyArrayObject *r_vector; PyArrayObject *lattice; PyArrayObject *q_vector; PyArrayObject *py_multiplicities; PyArrayObject *py_masses; PyArrayObject *py_s2p_map; PyArrayObject *py_p2s_map; PyArrayObject *py_born; PyArrayObject *dielectric; PyArrayObject *q_direction; double nac_factor; double *ddm; double *fc; double *q; double *lat; double *r; double *m; int *multi; int *s2p_map; int *p2s_map; int num_patom; int num_satom; double *z; double *epsilon; double *q_dir; if (!PyArg_ParseTuple(args, "OOOOOOOOOdOOO", &derivative_dynmat, &py_force_constants, &q_vector, &lattice, /* column vectors */ &r_vector, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map, &nac_factor, &py_born, &dielectric, &q_direction)) { return NULL; } ddm = (double *)PyArray_DATA(derivative_dynmat); fc = (double *)PyArray_DATA(py_force_constants); q = (double *)PyArray_DATA(q_vector); lat = (double *)PyArray_DATA(lattice); r = (double *)PyArray_DATA(r_vector); m = (double *)PyArray_DATA(py_masses); multi = (int *)PyArray_DATA(py_multiplicities); s2p_map = (int *)PyArray_DATA(py_s2p_map); p2s_map = (int *)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; if ((PyObject *) py_born == Py_None) { z = NULL; } else { z = (double *)PyArray_DATA(py_born); } if ((PyObject *) dielectric == Py_None) { epsilon = NULL; } else { epsilon = (double *)PyArray_DATA(dielectric); } if ((PyObject *) q_direction == Py_None) { q_dir = NULL; } else { q_dir = (double *)PyArray_DATA(q_direction); } get_derivative_dynmat_at_q(ddm, num_patom, num_satom, fc, q, lat, r, multi, m, s2p_map, p2s_map, nac_factor, z, epsilon, q_dir); Py_RETURN_NONE; } /* Thermal properties */ static PyObject * py_get_thermal_properties(PyObject * self, PyObject * args) { PyArrayObject *py_thermal_props; PyArrayObject *py_temperatures; PyArrayObject *py_frequencies; PyArrayObject *py_weights; double cutoff_frequency; double *temperatures; double *freqs; double *thermal_props; int *w; int num_qpoints; int num_bands; int num_temp; int i, j, k; long sum_weights; double f; double *tp; if (!PyArg_ParseTuple(args, "OOOOd", &py_thermal_props, &py_temperatures, &py_frequencies, &py_weights, &cutoff_frequency)) { return NULL; } thermal_props = (double *)PyArray_DATA(py_thermal_props); temperatures = (double *)PyArray_DATA(py_temperatures); num_temp = PyArray_DIMS(py_temperatures)[0]; freqs = (double *)PyArray_DATA(py_frequencies); num_qpoints = PyArray_DIMS(py_frequencies)[0]; w = (int *)PyArray_DATA(py_weights); num_bands = PyArray_DIMS(py_frequencies)[1]; tp = (double *)malloc(sizeof(double) * num_qpoints * num_temp * 3); for (i = 0; i < num_qpoints * num_temp * 3; i++) { tp[i] = 0; } for (i = 0; i < num_qpoints; i++) { for (j = 0; j < num_temp; j++) { for (k = 0; k < num_bands; k++) { f = freqs[i * num_bands + k]; if (temperatures[j] > 0 && f > cutoff_frequency) { tp[i * num_temp * 3 + j * 3] += get_free_energy(temperatures[j], f) * w[i]; tp[i * num_temp * 3 + j * 3 + 1] += get_entropy(temperatures[j], f) * w[i]; tp[i * num_temp * 3 + j * 3 + 2] += get_heat_capacity(temperatures[j], f) * w[i]; } } } } for (i = 0; i < num_qpoints; i++) { for (j = 0; j < num_temp * 3; j++) { thermal_props[j] += tp[i * num_temp * 3 + j]; } } free(tp); tp = NULL; Py_RETURN_NONE; } static PyObject * py_distribute_fc2(PyObject * self, PyObject * args) { PyArrayObject *py_force_constants; PyArrayObject *py_permutations; PyArrayObject *py_map_atoms; PyArrayObject *py_map_syms; PyArrayObject *py_atom_list; PyArrayObject *py_rotations_cart; double (*r_carts)[3][3]; double (*fc2)[3][3]; int *permutations; int *map_atoms; int *map_syms; int *atom_list; npy_intp num_pos, num_rot, len_atom_list; if (!PyArg_ParseTuple(args, "OOOOOO", &py_force_constants, &py_atom_list, &py_rotations_cart, &py_permutations, &py_map_atoms, &py_map_syms)) { return NULL; } fc2 = (double (*)[3][3])PyArray_DATA(py_force_constants); atom_list = (int *)PyArray_DATA(py_atom_list); len_atom_list = PyArray_DIMS(py_atom_list)[0]; permutations = (int *)PyArray_DATA(py_permutations); map_atoms = (int *)PyArray_DATA(py_map_atoms); map_syms = (int *)PyArray_DATA(py_map_syms); r_carts = (double (*)[3][3])PyArray_DATA(py_rotations_cart); num_rot = PyArray_DIMS(py_permutations)[0]; num_pos = PyArray_DIMS(py_permutations)[1]; if (PyArray_NDIM(py_map_atoms) != 1 || PyArray_DIMS(py_map_atoms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_atoms"); return NULL; } if (PyArray_NDIM(py_map_syms) != 1 || PyArray_DIMS(py_map_syms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_syms"); return NULL; } if (PyArray_DIMS(py_rotations_cart)[0] != num_rot) { PyErr_SetString(PyExc_ValueError, "permutations and rotations are different length"); return NULL; } distribute_fc2(fc2, atom_list, len_atom_list, r_carts, permutations, map_atoms, map_syms, num_rot, num_pos); Py_RETURN_NONE; } static PyObject * py_thm_neighboring_grid_points(PyObject * self, PyObject * args) { PyArrayObject *py_relative_grid_points; PyArrayObject *py_relative_grid_address; PyArrayObject *py_mesh; PyArrayObject *py_bz_grid_address; PyArrayObject *py_bz_map; int grid_point; int *relative_grid_points; int (*relative_grid_address)[3]; int num_relative_grid_address; int *mesh; int (*bz_grid_address)[3]; int *bz_map; if (!PyArg_ParseTuple(args, "OiOOOO", &py_relative_grid_points, &grid_point, &py_relative_grid_address, &py_mesh, &py_bz_grid_address, &py_bz_map)) { return NULL; } relative_grid_points = (int *)PyArray_DATA(py_relative_grid_points); relative_grid_address = (int (*)[3])PyArray_DATA(py_relative_grid_address); num_relative_grid_address = PyArray_DIMS(py_relative_grid_address)[0]; mesh = (int *)PyArray_DATA(py_mesh); bz_grid_address = (int (*)[3])PyArray_DATA(py_bz_grid_address); bz_map = (int *)PyArray_DATA(py_bz_map); thm_get_neighboring_grid_points(relative_grid_points, grid_point, relative_grid_address, num_relative_grid_address, mesh, bz_grid_address, bz_map); Py_RETURN_NONE; } static PyObject * py_thm_relative_grid_address(PyObject * self, PyObject * args) { PyArrayObject *py_relative_grid_address; PyArrayObject *py_reciprocal_lattice_py; int (*relative_grid_address)[4][3]; double (*reciprocal_lattice)[3]; if (!PyArg_ParseTuple(args, "OO", &py_relative_grid_address, &py_reciprocal_lattice_py)) { return NULL; } relative_grid_address = (int (*)[4][3])PyArray_DATA(py_relative_grid_address); reciprocal_lattice = (double (*)[3])PyArray_DATA(py_reciprocal_lattice_py); thm_get_relative_grid_address(relative_grid_address, reciprocal_lattice); Py_RETURN_NONE; } static PyObject * py_thm_all_relative_grid_address(PyObject * self, PyObject * args) { PyArrayObject *py_relative_grid_address; int (*relative_grid_address)[24][4][3]; if (!PyArg_ParseTuple(args, "O", &py_relative_grid_address)) { return NULL; } relative_grid_address = (int (*)[24][4][3])PyArray_DATA(py_relative_grid_address); thm_get_all_relative_grid_address(relative_grid_address); Py_RETURN_NONE; } static PyObject * py_thm_integration_weight(PyObject * self, PyObject * args) { double omega; PyArrayObject *py_tetrahedra_omegas; char *function; double (*tetrahedra_omegas)[4]; double iw; if (!PyArg_ParseTuple(args, "dOs", &omega, &py_tetrahedra_omegas, &function)) { return NULL; } tetrahedra_omegas = (double (*)[4])PyArray_DATA(py_tetrahedra_omegas); iw = thm_get_integration_weight(omega, tetrahedra_omegas, function[0]); return PyFloat_FromDouble(iw); } static PyObject * py_thm_integration_weight_at_omegas(PyObject * self, PyObject * args) { PyArrayObject *py_integration_weights; PyArrayObject *py_omegas; PyArrayObject *py_tetrahedra_omegas; char *function; double *omegas; double *iw; int num_omegas; double (*tetrahedra_omegas)[4]; if (!PyArg_ParseTuple(args, "OOOs", &py_integration_weights, &py_omegas, &py_tetrahedra_omegas, &function)) { return NULL; } omegas = (double *)PyArray_DATA(py_omegas); iw = (double *)PyArray_DATA(py_integration_weights); num_omegas = (int)PyArray_DIMS(py_omegas)[0]; tetrahedra_omegas = (double (*)[4])PyArray_DATA(py_tetrahedra_omegas); thm_get_integration_weight_at_omegas(iw, num_omegas, omegas, tetrahedra_omegas, function[0]); Py_RETURN_NONE; } static PyObject * py_get_tetrahedra_frequenies(PyObject * self, PyObject * args) { PyArrayObject *py_freq_tetras; PyArrayObject *py_grid_points; PyArrayObject *py_mesh; PyArrayObject *py_grid_address; PyArrayObject *py_gp_ir_index; PyArrayObject *py_relative_grid_address; PyArrayObject *py_frequencies; double *freq_tetras; int *grid_points; int num_gp_in; int *mesh; int (*grid_address)[3]; int *gp_ir_index; int (*relative_grid_address)[3]; double *frequencies; int num_band; int is_shift[3] = {0, 0, 0}; int i, j, k, gp; int g_addr[3]; int address_double[3]; if (!PyArg_ParseTuple(args, "OOOOOOO", &py_freq_tetras, &py_grid_points, &py_mesh, &py_grid_address, &py_gp_ir_index, &py_relative_grid_address, &py_frequencies)) { return NULL; } freq_tetras = (double *)PyArray_DATA(py_freq_tetras); grid_points = (int *)PyArray_DATA(py_grid_points); num_gp_in = (int)PyArray_DIMS(py_grid_points)[0]; mesh = (int *)PyArray_DATA(py_mesh); grid_address = (int (*)[3])PyArray_DATA(py_grid_address); gp_ir_index = (int *)PyArray_DATA(py_gp_ir_index); relative_grid_address = (int (*)[3])PyArray_DATA(py_relative_grid_address); frequencies = (double *)PyArray_DATA(py_frequencies); num_band = (int)PyArray_DIMS(py_frequencies)[1]; for (i = 0; i < num_gp_in; i++) { for (j = 0; j < num_band * 96; j++) { for (k = 0; k < 3; k++) { g_addr[k] = grid_address[grid_points[i]][k] + relative_grid_address[j % 96][k]; } kgd_get_grid_address_double_mesh(address_double, g_addr, mesh, is_shift); gp = kgd_get_grid_point_double_mesh(address_double, mesh); freq_tetras[i * num_band * 96 + j] = frequencies[gp_ir_index[gp] * num_band + j / 96]; } } Py_RETURN_NONE; } static PyObject * py_tetrahedron_method_dos(PyObject * self, PyObject * args) { PyArrayObject *py_dos; PyArrayObject *py_mesh; PyArrayObject *py_freq_points; PyArrayObject *py_frequencies; PyArrayObject *py_coef; PyArrayObject *py_grid_address; PyArrayObject *py_grid_mapping_table; PyArrayObject *py_relative_grid_address; double *dos; int *mesh; double *freq_points; int num_freq_points; double *frequencies; double *coef; int (*grid_address)[3]; int num_gp; int num_ir_gp; int num_coef; int num_band; int *grid_mapping_table; int (*relative_grid_address)[4][3]; int is_shift[3] = {0, 0, 0}; int i, j, k, l, m, q, r, count; int g_addr[3]; int ir_gps[24][4]; double tetrahedra[24][4]; int address_double[3]; int *gp2ir, *ir_grid_points, *weights; double iw; gp2ir = NULL; ir_grid_points = NULL; weights = NULL; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dos, &py_mesh, &py_freq_points, &py_frequencies, &py_coef, &py_grid_address, &py_grid_mapping_table, &py_relative_grid_address)) { return NULL; } /* dos[num_ir_gp][num_band][num_freq_points][num_coef] */ dos = (double *)PyArray_DATA(py_dos); mesh = (int *)PyArray_DATA(py_mesh); freq_points = (double *)PyArray_DATA(py_freq_points); num_freq_points = (int)PyArray_DIMS(py_freq_points)[0]; frequencies = (double *)PyArray_DATA(py_frequencies); num_ir_gp = (int)PyArray_DIMS(py_frequencies)[0]; num_band = (int)PyArray_DIMS(py_frequencies)[1]; coef = (double *)PyArray_DATA(py_coef); num_coef = (int)PyArray_DIMS(py_coef)[1]; grid_address = (int (*)[3])PyArray_DATA(py_grid_address); num_gp = (int)PyArray_DIMS(py_grid_address)[0]; grid_mapping_table = (int *)PyArray_DATA(py_grid_mapping_table); relative_grid_address = (int (*)[4][3])PyArray_DATA(py_relative_grid_address); gp2ir = (int *)malloc(sizeof(int) * num_gp); ir_grid_points = (int *)malloc(sizeof(int) * num_ir_gp); weights = (int *)malloc(sizeof(int) * num_ir_gp); count = 0; for (i = 0; i < num_gp; i++) { if (grid_mapping_table[i] == i) { gp2ir[i] = count; ir_grid_points[count] = i; weights[count] = 1; count++; } else { gp2ir[i] = gp2ir[grid_mapping_table[i]]; weights[gp2ir[i]]++; } } if (num_ir_gp != count) { printf("Something is wrong!\n"); } for (i = 0; i < num_ir_gp; i++) { /* set 24 tetrahedra */ for (l = 0; l < 24; l++) { for (q = 0; q < 4; q++) { for (r = 0; r < 3; r++) { g_addr[r] = grid_address[ir_grid_points[i]][r] + relative_grid_address[l][q][r]; } kgd_get_grid_address_double_mesh(address_double, g_addr, mesh, is_shift); ir_gps[l][q] = gp2ir[kgd_get_grid_point_double_mesh(address_double, mesh)]; } } for (k = 0; k < num_band; k++) { for (l = 0; l < 24; l++) { for (q = 0; q < 4; q++) { tetrahedra[l][q] = frequencies[ir_gps[l][q] * num_band + k]; } } for (j = 0; j < num_freq_points; j++) { iw = thm_get_integration_weight(freq_points[j], tetrahedra, 'I') * weights[i]; for (m = 0; m < num_coef; m++) { dos[i * num_band * num_freq_points * num_coef + k * num_coef * num_freq_points + j * num_coef + m] += iw * coef[i * num_coef * num_band + m * num_band + k]; } } } } free(gp2ir); gp2ir = NULL; free(ir_grid_points); ir_grid_points = NULL; free(weights); weights = NULL; Py_RETURN_NONE; } static double get_free_energy(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ return KB * temperature * log(1 - exp(-f / (KB * temperature))); } static double get_entropy(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ double val; val = f / (2 * KB * temperature); return 1 / (2 * temperature) * f * cosh(val) / sinh(val) - KB * log(2 * sinh(val)); } static double get_heat_capacity(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ /* If val is close to 1. Then expansion is used. */ double val, val1, val2; val = f / (KB * temperature); val1 = exp(val); val2 = (val) / (val1 - 1); return KB * val1 * val2 * val2; } /* static double get_energy(double temperature, double f){ */ /* /\* temperature is defined by T (K) *\/ */ /* /\* 'f' must be given in eV. *\/ */ /* return f / (exp(f / (KB * temperature)) - 1); */ /* } */ static int compute_permutation(int *rot_atom, PHPYCONST double lat[3][3], PHPYCONST double (*pos)[3], PHPYCONST double (*rot_pos)[3], const int num_pos, const double symprec) { int i, j, k, l; int search_start; double distance2, diff_cart; double diff[3]; for (i = 0; i < num_pos; i++) { rot_atom[i] = -1; } /* optimization: Iterate primarily by pos instead of rot_pos. */ /* (find where 0 belongs in rot_atom, then where 1 belongs, etc.) */ /* Then track the first unassigned index. */ /* */ /* This works best if the permutation is close to the identity. */ /* (more specifically, if the max value of 'rot_atom[i] - i' is small) */ search_start = 0; for (i = 0; i < num_pos; i++) { while (rot_atom[search_start] >= 0) { search_start++; } for (j = search_start; j < num_pos; j++) { if (rot_atom[j] >= 0) { continue; } for (k = 0; k < 3; k++) { diff[k] = pos[i][k] - rot_pos[j][k]; diff[k] -= nint(diff[k]); } distance2 = 0; for (k = 0; k < 3; k++) { diff_cart = 0; for (l = 0; l < 3; l++) { diff_cart += lat[k][l] * diff[l]; } distance2 += diff_cart * diff_cart; } if (sqrt(distance2) < symprec) { rot_atom[j] = i; break; } } } for (i = 0; i < num_pos; i++) { if (rot_atom[i] < 0) { printf("Encounter some problem in compute_permutation.\n"); return 0; } } return 1; } /* Implementation detail of get_smallest_vectors. */ /* Finds the smallest vectors within each list and copies them to the output. */ static void gsv_copy_smallest_vectors(double (*shortest_vectors)[27][3], int *multiplicity, PHPYCONST double (*vector_lists)[27][3], PHPYCONST double (*length_lists)[27], const int num_lists, const double symprec) { int i, j, k; int count; double minimum; double (*vectors)[3]; double *lengths; for (i = 0; i < num_lists; i++) { /* Look at a single list of 27 vectors. */ lengths = length_lists[i]; vectors = vector_lists[i]; /* Compute the minimum length. */ minimum = DBL_MAX; for (j = 0; j < 27; j++) { if (lengths[j] < minimum) { minimum = lengths[j]; } } /* Copy vectors whose length is within tolerance. */ count = 0; for (j = 0; j < 27; j++) { if (lengths[j] - minimum <= symprec) { for (k = 0; k < 3; k++) { shortest_vectors[i][count][k] = vectors[j][k]; } count++; } } multiplicity[i] = count; } } static void gsv_set_smallest_vectors(double (*smallest_vectors)[27][3], int *multiplicity, PHPYCONST double (*pos_to)[3], const int num_pos_to, PHPYCONST double (*pos_from)[3], const int num_pos_from, PHPYCONST int lattice_points[27][3], PHPYCONST double reduced_basis[3][3], PHPYCONST int trans_mat[3][3], const double symprec) { int i, j, k, l, count; double length_tmp, minimum, vec_xyz; double length[27], vec[27][3]; for (i = 0; i < num_pos_to; i++) { for (j = 0; j < num_pos_from; j++) { for (k = 0; k < 27; k++) { length[k] = 0; for (l = 0; l < 3; l++) { vec[k][l] = pos_to[i][l] - pos_from[j][l] + lattice_points[k][l]; } for (l = 0; l < 3; l++) { length_tmp = (reduced_basis[l][0] * vec[k][0] + reduced_basis[l][1] * vec[k][1] + reduced_basis[l][2] * vec[k][2]); length[k] += length_tmp * length_tmp; } length[k] = sqrt(length[k]); } minimum = DBL_MAX; for (k = 0; k < 27; k++) { if (length[k] < minimum) { minimum = length[k]; } } count = 0; for (k = 0; k < 27; k++) { if (length[k] - minimum < symprec) { for (l = 0; l < 3; l++) { /* Transform to supercell coordinates */ vec_xyz = (trans_mat[l][0] * vec[k][0] + trans_mat[l][1] * vec[k][1] + trans_mat[l][2] * vec[k][2]); smallest_vectors[i * num_pos_from + j][count][l] = vec_xyz; } count++; } } multiplicity[i * num_pos_from + j] = count; } } } static void distribute_fc2(double (*fc2)[3][3], /* shape[n_pos][n_pos] */ const int *atom_list, const int len_atom_list, PHPYCONST double (*r_carts)[3][3], /* shape[n_rot] */ const int *permutations, /* shape[n_rot][n_pos] */ const int *map_atoms, /* shape [n_pos] */ const int *map_syms, /* shape [n_pos] */ const int num_rot, const int num_pos) { int i, j, k, l, m; int atom_todo, atom_done, atom_other; int sym_index; int *atom_list_reverse; double (*fc2_done)[3]; double (*fc2_todo)[3]; double (*r_cart)[3]; const int *permutation; atom_list_reverse = NULL; atom_list_reverse = (int *)malloc(sizeof(int) * num_pos); /* atom_list_reverse[!atom_done] is undefined. */ for (i = 0; i < len_atom_list; i++) { atom_done = map_atoms[atom_list[i]]; if (atom_done == atom_list[i]) { atom_list_reverse[atom_done] = i; } } for (i = 0; i < len_atom_list; i++) { /* look up how this atom maps into the done list. */ atom_todo = atom_list[i]; atom_done = map_atoms[atom_todo]; sym_index = map_syms[atom_todo]; /* skip the atoms in the done list, */ /* which are easily identified because they map to themselves. */ if (atom_todo == atom_done) { continue; } /* look up information about the rotation */ r_cart = r_carts[sym_index]; permutation = &permutations[sym_index * num_pos]; /* shape[num_pos] */ /* distribute terms from atom_done to atom_todo */ for (atom_other = 0; atom_other < num_pos; atom_other++) { fc2_done = fc2[atom_list_reverse[atom_done] * num_pos + permutation[atom_other]]; fc2_todo = fc2[i * num_pos + atom_other]; for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { /* P' = R^-1 P R */ fc2_todo[j][k] += r_cart[l][j] * r_cart[m][k] * fc2_done[l][m]; } } } } } } free(atom_list_reverse); atom_list_reverse = NULL; } static void set_index_permutation_symmetry_fc(double *fc, const int natom) { int i, j, k, l, m, n; for (i = 0; i < natom; i++) { /* non diagonal part */ for (j = i + 1; j < natom; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { m = i * natom * 9 + j * 9 + k * 3 + l; n = j * natom * 9 + i * 9 + l * 3 + k; fc[m] += fc[n]; fc[m] /= 2; fc[n] = fc[m]; } } } /* diagnoal part */ for (k = 0; k < 2; k++) { for (l = k + 1; l < 3; l++) { m = i * natom * 9 + i * 9 + k * 3 + l; n = i * natom * 9 + i * 9 + l * 3 + k; fc[m] += fc[n]; fc[m] /= 2; fc[n] = fc[m]; } } } } static void set_translational_symmetry_fc(double *fc, const int natom) { int i, j, k, l, m; double sums[3][3]; for (i = 0; i < natom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sums[k][l] = 0; m = i * natom * 9 + k * 3 + l; for (j = 0; j < natom; j++) { if (i != j) { sums[k][l] += fc[m]; } m += 9; } } } for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { fc[i * natom * 9 + i * 9 + k * 3 + l] = -(sums[k][l] + sums[l][k]) / 2; } } } } static void set_index_permutation_symmetry_compact_fc(double *fc, const int p2s[], const int s2pp[], const int nsym_list[], const int perms[], const int n_satom, const int n_patom, const int is_transpose) { int i, j, k, l, m, n, i_p, j_p, i_trans; double fc_elem; char *done; done = NULL; done = (char *)malloc(sizeof(char) * n_satom * n_patom); for (i = 0; i < n_satom * n_patom; i++) { done[i] = 0; } for (j = 0; j < n_satom; j++) { j_p = s2pp[j]; for (i_p = 0; i_p < n_patom; i_p++) { i = p2s[i_p]; if (i == j) { /* diagnoal part */ for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { if (l > k) { m = i_p * n_satom * 9 + i * 9 + k * 3 + l; n = i_p * n_satom * 9 + i * 9 + l * 3 + k; if (is_transpose) { fc_elem = fc[m]; fc[m] = fc[n]; fc[n] = fc_elem; } else { fc[m] = (fc[m] + fc[n]) / 2; fc[n] = fc[m]; } } } } } if (!done[i_p * n_satom + j]) { /* (j, i) -- nsym_list[j] --> (j', i') */ /* nsym_list[j] translates j to j' where j' is in */ /* primitive cell. The same translation sends i to i' */ /* where i' is not necessarily to be in primitive cell. */ /* Thus, i' = perms[nsym_list[j] * n_satom + i] */ i_trans = perms[nsym_list[j] * n_satom + i]; done[i_p * n_satom + j] = 1; done[j_p * n_satom + i_trans] = 1; for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { m = i_p * n_satom * 9 + j * 9 + k * 3 + l; n = j_p * n_satom * 9 + i_trans * 9 + l * 3 + k; if (is_transpose) { fc_elem = fc[m]; fc[m] = fc[n]; fc[n] = fc_elem; } else { fc[m] = (fc[n] + fc[m]) / 2; fc[n] = fc[m]; } } } } } } free(done); done = NULL; } static void set_translational_symmetry_compact_fc(double *fc, const int p2s[], const int n_satom, const int n_patom) { int j, k, l, m, i_p; double sums[3][3]; for (i_p = 0; i_p < n_patom; i_p++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sums[k][l] = 0; m = i_p * n_satom * 9 + k * 3 + l; for (j = 0; j < n_satom; j++) { if (p2s[i_p] != j) { sums[k][l] += fc[m]; } m += 9; } } } for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { fc[i_p * n_satom * 9 + p2s[i_p] * 9 + k * 3 + l] = -(sums[k][l] + sums[l][k]) / 2; } } } } static int nint(const double a) { if (a < 0.0) return (int)(a - 0.5); else return (int)(a + 0.5); }
/* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <Python.h> #include <stdio.h> #include <math.h> #include <float.h> #include <numpy/arrayobject.h> #include <dynmat.h> #include <derivative_dynmat.h> #include <kgrid.h> #include <tetrahedron_method.h> #define KB 8.6173382568083159E-05 /* PHPYCONST is defined in dynmat.h */ /* Build dynamical matrix */ static PyObject *py_transform_dynmat_to_fc(PyObject * self, PyObject * args); static PyObject *py_perm_trans_symmetrize_fc(PyObject * self, PyObject * args); static PyObject * py_perm_trans_symmetrize_compact_fc(PyObject * self, PyObject * args); static PyObject *py_transpose_compact_fc(PyObject * self, PyObject * args); static PyObject *py_get_dynamical_matrix(PyObject * self, PyObject * args); static PyObject *py_get_nac_dynamical_matrix(PyObject * self, PyObject * args); static PyObject *py_get_dipole_dipole(PyObject * self, PyObject * args); static PyObject *py_get_dipole_dipole_q0(PyObject * self, PyObject * args); static PyObject *py_get_derivative_dynmat(PyObject * self, PyObject * args); static PyObject *py_get_thermal_properties(PyObject * self, PyObject * args); static PyObject *py_distribute_fc2(PyObject * self, PyObject * args); static PyObject *py_compute_permutation(PyObject * self, PyObject * args); static PyObject *py_gsv_copy_smallest_vectors(PyObject * self, PyObject * args); static PyObject *py_gsv_set_smallest_vectors(PyObject * self, PyObject * args); static PyObject * py_thm_neighboring_grid_points(PyObject * self, PyObject * args); static PyObject * py_thm_relative_grid_address(PyObject * self, PyObject * args); static PyObject * py_thm_all_relative_grid_address(PyObject * self, PyObject * args); static PyObject * py_thm_integration_weight(PyObject * self, PyObject * args); static PyObject * py_thm_integration_weight_at_omegas(PyObject * self, PyObject * args); static PyObject *py_get_tetrahedra_frequenies(PyObject * self, PyObject * args); static PyObject *py_tetrahedron_method_dos(PyObject * self, PyObject * args); static void distribute_fc2(double (*fc2)[3][3], const int *atom_list, const int len_atom_list, PHPYCONST double (*r_carts)[3][3], const int *permutations, const int *map_atoms, const int *map_syms, const int num_rot, const int num_pos); static int compute_permutation(int *rot_atom, PHPYCONST double lat[3][3], PHPYCONST double (*pos)[3], PHPYCONST double (*rot_pos)[3], const int num_pos, const double symprec); static void gsv_copy_smallest_vectors(double (*shortest_vectors)[27][3], int *multiplicity, PHPYCONST double (*vector_lists)[27][3], PHPYCONST double (*length_lists)[27], const int num_lists, const double symprec); static void gsv_set_smallest_vectors(double (*smallest_vectors)[27][3], int *multiplicity, PHPYCONST double (*pos_to)[3], const int num_pos_to, PHPYCONST double (*pos_from)[3], const int num_pos_from, PHPYCONST int lattice_points[27][3], PHPYCONST double reduced_basis[3][3], PHPYCONST int trans_mat[3][3], const double symprec); static double get_free_energy(const double temperature, const double f); static double get_entropy(const double temperature, const double f); static double get_heat_capacity(const double temperature, const double f); static void set_index_permutation_symmetry_fc(double *fc, const int natom); static void set_translational_symmetry_fc(double *fc, const int natom); static void set_index_permutation_symmetry_compact_fc(double *fc, const int p2s[], const int s2pp[], const int nsym_list[], const int perms[], const int n_satom, const int n_patom, const int is_transpose); static void set_translational_symmetry_compact_fc(double *fc, const int p2s[], const int n_satom, const int n_patom); /* static double get_energy(double temperature, double f); */ static int nint(const double a); struct module_state { PyObject *error; }; #if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) #else #define GETSTATE(m) (&_state) static struct module_state _state; #endif static PyObject * error_out(PyObject * m) { struct module_state *st = GETSTATE(m); PyErr_SetString(st->error, "something bad happened"); return NULL; } static PyMethodDef _phonopy_methods[] = { {"error_out", (PyCFunction) error_out, METH_NOARGS, NULL}, {"transform_dynmat_to_fc", py_transform_dynmat_to_fc, METH_VARARGS, "Transform a set of dynmat to force constants"}, {"perm_trans_symmetrize_fc", py_perm_trans_symmetrize_fc, METH_VARARGS, "Enforce permutation and translational symmetry of force constants"}, {"perm_trans_symmetrize_compact_fc", py_perm_trans_symmetrize_compact_fc, METH_VARARGS, "Enforce permutation and translational symmetry of compact force constants"}, {"transpose_compact_fc", py_transpose_compact_fc, METH_VARARGS, "Transpose compact force constants"}, {"dynamical_matrix", py_get_dynamical_matrix, METH_VARARGS, "Dynamical matrix"}, {"nac_dynamical_matrix", py_get_nac_dynamical_matrix, METH_VARARGS, "NAC dynamical matrix"}, {"dipole_dipole", py_get_dipole_dipole, METH_VARARGS, "Dipole-dipole interaction"}, {"dipole_dipole_q0", py_get_dipole_dipole_q0, METH_VARARGS, "q=0 terms of Dipole-dipole interaction"}, {"derivative_dynmat", py_get_derivative_dynmat, METH_VARARGS, "Q derivative of dynamical matrix"}, {"thermal_properties", py_get_thermal_properties, METH_VARARGS, "Thermal properties"}, {"distribute_fc2", py_distribute_fc2, METH_VARARGS, "Distribute force constants for all atoms in atom_list using precomputed symmetry mappings."}, {"compute_permutation", py_compute_permutation, METH_VARARGS, "Compute indices of original points in a set of rotated points."}, {"gsv_copy_smallest_vectors", py_gsv_copy_smallest_vectors, METH_VARARGS, "Implementation detail of get_smallest_vectors."}, {"gsv_set_smallest_vectors", py_gsv_set_smallest_vectors, METH_VARARGS, "Set candidate vectors."}, {"neighboring_grid_points", py_thm_neighboring_grid_points, METH_VARARGS, "Neighboring grid points by relative grid addresses"}, {"tetrahedra_relative_grid_address", py_thm_relative_grid_address, METH_VARARGS, "Relative grid addresses of vertices of 24 tetrahedra"}, {"all_tetrahedra_relative_grid_address", py_thm_all_relative_grid_address, METH_VARARGS, "4 (all) sets of relative grid addresses of vertices of 24 tetrahedra"}, {"tetrahedra_integration_weight", py_thm_integration_weight, METH_VARARGS, "Integration weight for tetrahedron method"}, {"tetrahedra_integration_weight_at_omegas", py_thm_integration_weight_at_omegas, METH_VARARGS, "Integration weight for tetrahedron method at omegas"}, {"get_tetrahedra_frequencies", py_get_tetrahedra_frequenies, METH_VARARGS, "Run tetrahedron method"}, {"tetrahedron_method_dos", py_tetrahedron_method_dos, METH_VARARGS, "Run tetrahedron method"}, {NULL, NULL, 0, NULL} }; #if PY_MAJOR_VERSION >= 3 static int _phonopy_traverse(PyObject * m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->error); return 0; } static int _phonopy_clear(PyObject * m) { Py_CLEAR(GETSTATE(m)->error); return 0; } static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_phonopy", NULL, sizeof(struct module_state), _phonopy_methods, NULL, _phonopy_traverse, _phonopy_clear, NULL }; #define INITERROR return NULL PyObject * PyInit__phonopy(void) #else #define INITERROR return void init_phonopy(void) #endif { #if PY_MAJOR_VERSION >= 3 PyObject *module = PyModule_Create(&moduledef); #else PyObject *module = Py_InitModule("_phonopy", _phonopy_methods); #endif struct module_state *st; if (module == NULL) INITERROR; st = GETSTATE(module); st->error = PyErr_NewException("_phonopy.Error", NULL, NULL); if (st->error == NULL) { Py_DECREF(module); INITERROR; } #if PY_MAJOR_VERSION >= 3 return module; #endif } static PyObject * py_transform_dynmat_to_fc(PyObject * self, PyObject * args) { PyArrayObject *py_force_constants; PyArrayObject *py_dynamical_matrices; PyArrayObject *py_commensurate_points; PyArrayObject *py_shortest_vectors; PyArrayObject *py_multiplicities; PyArrayObject *py_masses; PyArrayObject *py_s2pp_map; PyArrayObject *py_fc_index_map; double *fc; double *dm; double (*comm_points)[3]; double (*shortest_vectors)[27][3]; double *masses; int *multiplicities; int *s2pp_map; int *fc_index_map; int num_patom; int num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_force_constants, &py_dynamical_matrices, &py_commensurate_points, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2pp_map, &py_fc_index_map)) { return NULL; } fc = (double *)PyArray_DATA(py_force_constants); dm = (double *)PyArray_DATA(py_dynamical_matrices); comm_points = (double (*)[3])PyArray_DATA(py_commensurate_points); shortest_vectors = (double (*)[27][3])PyArray_DATA(py_shortest_vectors); masses = (double *)PyArray_DATA(py_masses); multiplicities = (int *)PyArray_DATA(py_multiplicities); s2pp_map = (int *)PyArray_DATA(py_s2pp_map); fc_index_map = (int *)PyArray_DATA(py_fc_index_map); num_patom = PyArray_DIMS(py_multiplicities)[1]; num_satom = PyArray_DIMS(py_multiplicities)[0]; dym_transform_dynmat_to_fc(fc, dm, comm_points, shortest_vectors, multiplicities, masses, s2pp_map, fc_index_map, num_patom, num_satom); Py_RETURN_NONE; } static PyObject * py_compute_permutation(PyObject * self, PyObject * args) { PyArrayObject *permutation; PyArrayObject *lattice; PyArrayObject *positions; PyArrayObject *permuted_positions; double symprec; int *rot_atoms; double (*lat)[3]; double (*pos)[3]; double (*rot_pos)[3]; int num_pos; int is_found; if (!PyArg_ParseTuple(args, "OOOOd", &permutation, &lattice, &positions, &permuted_positions, &symprec)) { return NULL; } rot_atoms = (int *)PyArray_DATA(permutation); lat = (double (*)[3])PyArray_DATA(lattice); pos = (double (*)[3])PyArray_DATA(positions); rot_pos = (double (*)[3])PyArray_DATA(permuted_positions); num_pos = PyArray_DIMS(positions)[0]; is_found = compute_permutation(rot_atoms, lat, pos, rot_pos, num_pos, symprec); return Py_BuildValue("i", is_found); } static PyObject * py_gsv_copy_smallest_vectors(PyObject * self, PyObject * args) { PyArrayObject *py_shortest_vectors; PyArrayObject *py_multiplicity; PyArrayObject *py_vectors; PyArrayObject *py_lengths; double symprec; double (*shortest_vectors)[27][3]; double (*vectors)[27][3]; double (*lengths)[27]; int *multiplicity; int size_super, size_prim; if (!PyArg_ParseTuple(args, "OOOOd", &py_shortest_vectors, &py_multiplicity, &py_vectors, &py_lengths, &symprec)) { return NULL; } shortest_vectors = (double (*)[27][3])PyArray_DATA(py_shortest_vectors); multiplicity = (int *)PyArray_DATA(py_multiplicity); vectors = (double (*)[27][3])PyArray_DATA(py_vectors); lengths = (double (*)[27])PyArray_DATA(py_lengths); size_super = PyArray_DIMS(py_vectors)[0]; size_prim = PyArray_DIMS(py_vectors)[1]; gsv_copy_smallest_vectors(shortest_vectors, multiplicity, vectors, lengths, size_super * size_prim, symprec); Py_RETURN_NONE; } static PyObject * py_gsv_set_smallest_vectors(PyObject * self, PyObject * args) { PyArrayObject *py_smallest_vectors; PyArrayObject *py_multiplicity; PyArrayObject *py_pos_to; PyArrayObject *py_pos_from; PyArrayObject *py_lattice_points; PyArrayObject *py_reduced_basis; PyArrayObject *py_trans_mat; double symprec; double (*smallest_vectors)[27][3]; int *multiplicity; double (*pos_to)[3]; double (*pos_from)[3]; int (*lattice_points)[3]; double (*reduced_basis)[3]; int (*trans_mat)[3]; int num_pos_to, num_pos_from; if (!PyArg_ParseTuple(args, "OOOOOOOd", &py_smallest_vectors, &py_multiplicity, &py_pos_to, &py_pos_from, &py_lattice_points, &py_reduced_basis, &py_trans_mat, &symprec)) { return NULL; } smallest_vectors = (double (*)[27][3])PyArray_DATA(py_smallest_vectors); multiplicity = (int *)PyArray_DATA(py_multiplicity); pos_to = (double (*)[3])PyArray_DATA(py_pos_to); pos_from = (double (*)[3])PyArray_DATA(py_pos_from); num_pos_to = PyArray_DIMS(py_pos_to)[0]; num_pos_from = PyArray_DIMS(py_pos_from)[0]; lattice_points = (int (*)[3])PyArray_DATA(py_lattice_points); reduced_basis = (double (*)[3])PyArray_DATA(py_reduced_basis); trans_mat = (int (*)[3])PyArray_DATA(py_trans_mat); gsv_set_smallest_vectors(smallest_vectors, multiplicity, pos_to, num_pos_to, pos_from, num_pos_from, lattice_points, reduced_basis, trans_mat, symprec); Py_RETURN_NONE; } static PyObject * py_perm_trans_symmetrize_fc(PyObject * self, PyObject * args) { PyArrayObject *force_constants; double *fc; int level; int n_satom, i, j, k, l, iter; double sum; if (!PyArg_ParseTuple(args, "Oi", &force_constants, &level)) { return NULL; } fc = (double *)PyArray_DATA(force_constants); n_satom = PyArray_DIMS(force_constants)[0]; for (iter = 0; iter < level; iter++) { /* Subtract drift along column */ for (j = 0; j < n_satom; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (i = 0; i < n_satom; i++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (i = 0; i < n_satom; i++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } /* Subtract drift along row */ for (i = 0; i < n_satom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (j = 0; j < n_satom; j++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (j = 0; j < n_satom; j++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } set_index_permutation_symmetry_fc(fc, n_satom); } set_translational_symmetry_fc(fc, n_satom); Py_RETURN_NONE; } static PyObject * py_perm_trans_symmetrize_compact_fc(PyObject * self, PyObject * args) { PyArrayObject *py_fc; PyArrayObject *py_permutations; PyArrayObject *py_s2pp_map; PyArrayObject *py_p2s_map; PyArrayObject *py_nsym_list; int level; double *fc; int *perms; int *s2pp; int *p2s; int *nsym_list; int n_patom, n_satom, i, j, k, l, n, iter; double sum; if (!PyArg_ParseTuple(args, "OOOOOi", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list, &level)) { return NULL; } fc = (double *)PyArray_DATA(py_fc); perms = (int *)PyArray_DATA(py_permutations); s2pp = (int *)PyArray_DATA(py_s2pp_map); p2s = (int *)PyArray_DATA(py_p2s_map); nsym_list = (int *)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; for (iter = 0; iter < level; iter++) { for (n = 0; n < 2; n++) { /* transpose only */ set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 1); for (i = 0; i < n_patom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sum = 0; for (j = 0; j < n_satom; j++) { sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l]; } sum /= n_satom; for (j = 0; j < n_satom; j++) { fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum; } } } } } set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 0); } set_translational_symmetry_compact_fc(fc, p2s, n_satom, n_patom); Py_RETURN_NONE; } static PyObject * py_transpose_compact_fc(PyObject * self, PyObject * args) { PyArrayObject *py_fc; PyArrayObject *py_permutations; PyArrayObject *py_s2pp_map; PyArrayObject *py_p2s_map; PyArrayObject *py_nsym_list; double *fc; int *s2pp; int *p2s; int *nsym_list; int *perms; int n_patom, n_satom; if (!PyArg_ParseTuple(args, "OOOOO", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list)) { return NULL; } fc = (double *)PyArray_DATA(py_fc); perms = (int *)PyArray_DATA(py_permutations); s2pp = (int *)PyArray_DATA(py_s2pp_map); p2s = (int *)PyArray_DATA(py_p2s_map); nsym_list = (int *)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 1); Py_RETURN_NONE; } static PyObject * py_get_dynamical_matrix(PyObject * self, PyObject * args) { PyArrayObject *py_dynamical_matrix; PyArrayObject *py_force_constants; PyArrayObject *py_shortest_vectors; PyArrayObject *py_q; PyArrayObject *py_multiplicities; PyArrayObject *py_masses; PyArrayObject *py_s2p_map; PyArrayObject *py_p2s_map; double *dm; double *fc; double *q; double (*svecs)[27][3]; double *m; int *multi; int *s2p_map; int *p2s_map; int num_patom; int num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dynamical_matrix, &py_force_constants, &py_q, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map)) { return NULL; } dm = (double *)PyArray_DATA(py_dynamical_matrix); fc = (double *)PyArray_DATA(py_force_constants); q = (double *)PyArray_DATA(py_q); svecs = (double (*)[27][3])PyArray_DATA(py_shortest_vectors); m = (double *)PyArray_DATA(py_masses); multi = (int *)PyArray_DATA(py_multiplicities); s2p_map = (int *)PyArray_DATA(py_s2p_map); p2s_map = (int *)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; dym_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, NULL, 1); Py_RETURN_NONE; } static PyObject * py_get_nac_dynamical_matrix(PyObject * self, PyObject * args) { PyArrayObject *py_dynamical_matrix; PyArrayObject *py_force_constants; PyArrayObject *py_shortest_vectors; PyArrayObject *py_q_cart; PyArrayObject *py_q; PyArrayObject *py_multiplicities; PyArrayObject *py_masses; PyArrayObject *py_s2p_map; PyArrayObject *py_p2s_map; PyArrayObject *py_born; double factor; double *dm; double *fc; double *q_cart; double *q; double (*svecs)[27][3]; double *m; double (*born)[3][3]; int *multi; int *s2p_map; int *p2s_map; int num_patom; int num_satom; int n; double (*charge_sum)[3][3]; if (!PyArg_ParseTuple(args, "OOOOOOOOOOd", &py_dynamical_matrix, &py_force_constants, &py_q, &py_shortest_vectors, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map, &py_q_cart, &py_born, &factor)) return NULL; dm = (double *)PyArray_DATA(py_dynamical_matrix); fc = (double *)PyArray_DATA(py_force_constants); q_cart = (double *)PyArray_DATA(py_q_cart); q = (double *)PyArray_DATA(py_q); svecs = (double (*)[27][3])PyArray_DATA(py_shortest_vectors); m = (double *)PyArray_DATA(py_masses); born = (double (*)[3][3])PyArray_DATA(py_born); multi = (int *)PyArray_DATA(py_multiplicities); s2p_map = (int *)PyArray_DATA(py_s2p_map); p2s_map = (int *)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; charge_sum = (double (*)[3][3]) malloc(sizeof(double[3][3]) * num_patom * num_patom); n = num_satom / num_patom; dym_get_charge_sum(charge_sum, num_patom, factor / n, q_cart, born); dym_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, charge_sum, 1); free(charge_sum); Py_RETURN_NONE; } static PyObject * py_get_dipole_dipole(PyObject * self, PyObject * args) { PyArrayObject *py_dd; PyArrayObject *py_dd_q0; PyArrayObject *py_G_list; PyArrayObject *py_q_cart; PyArrayObject *py_q_direction; PyArrayObject *py_born; PyArrayObject *py_dielectric; PyArrayObject *py_positions; double factor; double lambda; double tolerance; double *dd; double *dd_q0; double (*G_list)[3]; double *q_vector; double *q_direction; double (*born)[3][3]; double (*dielectric)[3]; double (*pos)[3]; int num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOOOOddd", &py_dd, &py_dd_q0, &py_G_list, &py_q_cart, &py_q_direction, &py_born, &py_dielectric, &py_positions, &factor, &lambda, &tolerance)) return NULL; dd = (double *)PyArray_DATA(py_dd); dd_q0 = (double *)PyArray_DATA(py_dd_q0); G_list = (double (*)[3])PyArray_DATA(py_G_list); if ((PyObject *) py_q_direction == Py_None) { q_direction = NULL; } else { q_direction = (double *)PyArray_DATA(py_q_direction); } q_vector = (double *)PyArray_DATA(py_q_cart); born = (double (*)[3][3])PyArray_DATA(py_born); dielectric = (double (*)[3])PyArray_DATA(py_dielectric); pos = (double (*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; dym_get_dipole_dipole(dd, /* [natom, 3, natom, 3, (real, imag)] */ dd_q0,/* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, q_vector, q_direction, born, dielectric, pos, /* [natom, 3] */ factor, /* 4pi/V*unit-conv */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject * py_get_dipole_dipole_q0(PyObject * self, PyObject * args) { PyArrayObject *py_dd_q0; PyArrayObject *py_G_list; PyArrayObject *py_born; PyArrayObject *py_dielectric; PyArrayObject *py_positions; double lambda; double tolerance; double *dd_q0; double (*G_list)[3]; double (*born)[3][3]; double (*dielectric)[3]; double (*pos)[3]; int num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOdd", &py_dd_q0, &py_G_list, &py_born, &py_dielectric, &py_positions, &lambda, &tolerance)) return NULL; dd_q0 = (double *)PyArray_DATA(py_dd_q0); G_list = (double (*)[3])PyArray_DATA(py_G_list); born = (double (*)[3][3])PyArray_DATA(py_born); dielectric = (double (*)[3])PyArray_DATA(py_dielectric); pos = (double (*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; dym_get_dipole_dipole_q0(dd_q0, /* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, born, dielectric, pos, /* [natom, 3] */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject * py_get_derivative_dynmat(PyObject * self, PyObject * args) { PyArrayObject *derivative_dynmat; PyArrayObject *py_force_constants; PyArrayObject *r_vector; PyArrayObject *lattice; PyArrayObject *q_vector; PyArrayObject *py_multiplicities; PyArrayObject *py_masses; PyArrayObject *py_s2p_map; PyArrayObject *py_p2s_map; PyArrayObject *py_born; PyArrayObject *dielectric; PyArrayObject *q_direction; double nac_factor; double *ddm; double *fc; double *q; double *lat; double *r; double *m; int *multi; int *s2p_map; int *p2s_map; int num_patom; int num_satom; double *z; double *epsilon; double *q_dir; if (!PyArg_ParseTuple(args, "OOOOOOOOOdOOO", &derivative_dynmat, &py_force_constants, &q_vector, &lattice, /* column vectors */ &r_vector, &py_multiplicities, &py_masses, &py_s2p_map, &py_p2s_map, &nac_factor, &py_born, &dielectric, &q_direction)) { return NULL; } ddm = (double *)PyArray_DATA(derivative_dynmat); fc = (double *)PyArray_DATA(py_force_constants); q = (double *)PyArray_DATA(q_vector); lat = (double *)PyArray_DATA(lattice); r = (double *)PyArray_DATA(r_vector); m = (double *)PyArray_DATA(py_masses); multi = (int *)PyArray_DATA(py_multiplicities); s2p_map = (int *)PyArray_DATA(py_s2p_map); p2s_map = (int *)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; if ((PyObject *) py_born == Py_None) { z = NULL; } else { z = (double *)PyArray_DATA(py_born); } if ((PyObject *) dielectric == Py_None) { epsilon = NULL; } else { epsilon = (double *)PyArray_DATA(dielectric); } if ((PyObject *) q_direction == Py_None) { q_dir = NULL; } else { q_dir = (double *)PyArray_DATA(q_direction); } get_derivative_dynmat_at_q(ddm, num_patom, num_satom, fc, q, lat, r, multi, m, s2p_map, p2s_map, nac_factor, z, epsilon, q_dir); Py_RETURN_NONE; } /* Thermal properties */ static PyObject * py_get_thermal_properties(PyObject * self, PyObject * args) { PyArrayObject *py_thermal_props; PyArrayObject *py_temperatures; PyArrayObject *py_frequencies; PyArrayObject *py_weights; double cutoff_frequency; double *temperatures; double *freqs; double *thermal_props; int *w; int num_qpoints; int num_bands; int num_temp; int i, j, k; long sum_weights; double f; double *tp; if (!PyArg_ParseTuple(args, "OOOOd", &py_thermal_props, &py_temperatures, &py_frequencies, &py_weights, &cutoff_frequency)) { return NULL; } thermal_props = (double *)PyArray_DATA(py_thermal_props); temperatures = (double *)PyArray_DATA(py_temperatures); num_temp = PyArray_DIMS(py_temperatures)[0]; freqs = (double *)PyArray_DATA(py_frequencies); num_qpoints = PyArray_DIMS(py_frequencies)[0]; w = (int *)PyArray_DATA(py_weights); num_bands = PyArray_DIMS(py_frequencies)[1]; tp = (double *)malloc(sizeof(double) * num_qpoints * num_temp * 3); for (i = 0; i < num_qpoints * num_temp * 3; i++) { tp[i] = 0; } #pragma omp parallel for private(j, k, f) for (i = 0; i < num_qpoints; i++) { for (j = 0; j < num_temp; j++) { for (k = 0; k < num_bands; k++) { f = freqs[i * num_bands + k]; if (temperatures[j] > 0 && f > cutoff_frequency) { tp[i * num_temp * 3 + j * 3] += get_free_energy(temperatures[j], f) * w[i]; tp[i * num_temp * 3 + j * 3 + 1] += get_entropy(temperatures[j], f) * w[i]; tp[i * num_temp * 3 + j * 3 + 2] += get_heat_capacity(temperatures[j], f) * w[i]; } } } } for (i = 0; i < num_qpoints; i++) { for (j = 0; j < num_temp * 3; j++) { thermal_props[j] += tp[i * num_temp * 3 + j]; } } free(tp); tp = NULL; Py_RETURN_NONE; } static PyObject * py_distribute_fc2(PyObject * self, PyObject * args) { PyArrayObject *py_force_constants; PyArrayObject *py_permutations; PyArrayObject *py_map_atoms; PyArrayObject *py_map_syms; PyArrayObject *py_atom_list; PyArrayObject *py_rotations_cart; double (*r_carts)[3][3]; double (*fc2)[3][3]; int *permutations; int *map_atoms; int *map_syms; int *atom_list; npy_intp num_pos, num_rot, len_atom_list; if (!PyArg_ParseTuple(args, "OOOOOO", &py_force_constants, &py_atom_list, &py_rotations_cart, &py_permutations, &py_map_atoms, &py_map_syms)) { return NULL; } fc2 = (double (*)[3][3])PyArray_DATA(py_force_constants); atom_list = (int *)PyArray_DATA(py_atom_list); len_atom_list = PyArray_DIMS(py_atom_list)[0]; permutations = (int *)PyArray_DATA(py_permutations); map_atoms = (int *)PyArray_DATA(py_map_atoms); map_syms = (int *)PyArray_DATA(py_map_syms); r_carts = (double (*)[3][3])PyArray_DATA(py_rotations_cart); num_rot = PyArray_DIMS(py_permutations)[0]; num_pos = PyArray_DIMS(py_permutations)[1]; if (PyArray_NDIM(py_map_atoms) != 1 || PyArray_DIMS(py_map_atoms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_atoms"); return NULL; } if (PyArray_NDIM(py_map_syms) != 1 || PyArray_DIMS(py_map_syms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_syms"); return NULL; } if (PyArray_DIMS(py_rotations_cart)[0] != num_rot) { PyErr_SetString(PyExc_ValueError, "permutations and rotations are different length"); return NULL; } distribute_fc2(fc2, atom_list, len_atom_list, r_carts, permutations, map_atoms, map_syms, num_rot, num_pos); Py_RETURN_NONE; } static PyObject * py_thm_neighboring_grid_points(PyObject * self, PyObject * args) { PyArrayObject *py_relative_grid_points; PyArrayObject *py_relative_grid_address; PyArrayObject *py_mesh; PyArrayObject *py_bz_grid_address; PyArrayObject *py_bz_map; int grid_point; int *relative_grid_points; int (*relative_grid_address)[3]; int num_relative_grid_address; int *mesh; int (*bz_grid_address)[3]; int *bz_map; if (!PyArg_ParseTuple(args, "OiOOOO", &py_relative_grid_points, &grid_point, &py_relative_grid_address, &py_mesh, &py_bz_grid_address, &py_bz_map)) { return NULL; } relative_grid_points = (int *)PyArray_DATA(py_relative_grid_points); relative_grid_address = (int (*)[3])PyArray_DATA(py_relative_grid_address); num_relative_grid_address = PyArray_DIMS(py_relative_grid_address)[0]; mesh = (int *)PyArray_DATA(py_mesh); bz_grid_address = (int (*)[3])PyArray_DATA(py_bz_grid_address); bz_map = (int *)PyArray_DATA(py_bz_map); thm_get_neighboring_grid_points(relative_grid_points, grid_point, relative_grid_address, num_relative_grid_address, mesh, bz_grid_address, bz_map); Py_RETURN_NONE; } static PyObject * py_thm_relative_grid_address(PyObject * self, PyObject * args) { PyArrayObject *py_relative_grid_address; PyArrayObject *py_reciprocal_lattice_py; int (*relative_grid_address)[4][3]; double (*reciprocal_lattice)[3]; if (!PyArg_ParseTuple(args, "OO", &py_relative_grid_address, &py_reciprocal_lattice_py)) { return NULL; } relative_grid_address = (int (*)[4][3])PyArray_DATA(py_relative_grid_address); reciprocal_lattice = (double (*)[3])PyArray_DATA(py_reciprocal_lattice_py); thm_get_relative_grid_address(relative_grid_address, reciprocal_lattice); Py_RETURN_NONE; } static PyObject * py_thm_all_relative_grid_address(PyObject * self, PyObject * args) { PyArrayObject *py_relative_grid_address; int (*relative_grid_address)[24][4][3]; if (!PyArg_ParseTuple(args, "O", &py_relative_grid_address)) { return NULL; } relative_grid_address = (int (*)[24][4][3])PyArray_DATA(py_relative_grid_address); thm_get_all_relative_grid_address(relative_grid_address); Py_RETURN_NONE; } static PyObject * py_thm_integration_weight(PyObject * self, PyObject * args) { double omega; PyArrayObject *py_tetrahedra_omegas; char *function; double (*tetrahedra_omegas)[4]; double iw; if (!PyArg_ParseTuple(args, "dOs", &omega, &py_tetrahedra_omegas, &function)) { return NULL; } tetrahedra_omegas = (double (*)[4])PyArray_DATA(py_tetrahedra_omegas); iw = thm_get_integration_weight(omega, tetrahedra_omegas, function[0]); return PyFloat_FromDouble(iw); } static PyObject * py_thm_integration_weight_at_omegas(PyObject * self, PyObject * args) { PyArrayObject *py_integration_weights; PyArrayObject *py_omegas; PyArrayObject *py_tetrahedra_omegas; char *function; double *omegas; double *iw; int num_omegas; double (*tetrahedra_omegas)[4]; if (!PyArg_ParseTuple(args, "OOOs", &py_integration_weights, &py_omegas, &py_tetrahedra_omegas, &function)) { return NULL; } omegas = (double *)PyArray_DATA(py_omegas); iw = (double *)PyArray_DATA(py_integration_weights); num_omegas = (int)PyArray_DIMS(py_omegas)[0]; tetrahedra_omegas = (double (*)[4])PyArray_DATA(py_tetrahedra_omegas); thm_get_integration_weight_at_omegas(iw, num_omegas, omegas, tetrahedra_omegas, function[0]); Py_RETURN_NONE; } static PyObject * py_get_tetrahedra_frequenies(PyObject * self, PyObject * args) { PyArrayObject *py_freq_tetras; PyArrayObject *py_grid_points; PyArrayObject *py_mesh; PyArrayObject *py_grid_address; PyArrayObject *py_gp_ir_index; PyArrayObject *py_relative_grid_address; PyArrayObject *py_frequencies; double *freq_tetras; int *grid_points; int num_gp_in; int *mesh; int (*grid_address)[3]; int *gp_ir_index; int (*relative_grid_address)[3]; double *frequencies; int num_band; int is_shift[3] = {0, 0, 0}; int i, j, k, gp; int g_addr[3]; int address_double[3]; if (!PyArg_ParseTuple(args, "OOOOOOO", &py_freq_tetras, &py_grid_points, &py_mesh, &py_grid_address, &py_gp_ir_index, &py_relative_grid_address, &py_frequencies)) { return NULL; } freq_tetras = (double *)PyArray_DATA(py_freq_tetras); grid_points = (int *)PyArray_DATA(py_grid_points); num_gp_in = (int)PyArray_DIMS(py_grid_points)[0]; mesh = (int *)PyArray_DATA(py_mesh); grid_address = (int (*)[3])PyArray_DATA(py_grid_address); gp_ir_index = (int *)PyArray_DATA(py_gp_ir_index); relative_grid_address = (int (*)[3])PyArray_DATA(py_relative_grid_address); frequencies = (double *)PyArray_DATA(py_frequencies); num_band = (int)PyArray_DIMS(py_frequencies)[1]; for (i = 0; i < num_gp_in; i++) { #pragma omp parallel for private(k, g_addr, gp, address_double) for (j = 0; j < num_band * 96; j++) { for (k = 0; k < 3; k++) { g_addr[k] = grid_address[grid_points[i]][k] + relative_grid_address[j % 96][k]; } kgd_get_grid_address_double_mesh(address_double, g_addr, mesh, is_shift); gp = kgd_get_grid_point_double_mesh(address_double, mesh); freq_tetras[i * num_band * 96 + j] = frequencies[gp_ir_index[gp] * num_band + j / 96]; } } Py_RETURN_NONE; } static PyObject * py_tetrahedron_method_dos(PyObject * self, PyObject * args) { PyArrayObject *py_dos; PyArrayObject *py_mesh; PyArrayObject *py_freq_points; PyArrayObject *py_frequencies; PyArrayObject *py_coef; PyArrayObject *py_grid_address; PyArrayObject *py_grid_mapping_table; PyArrayObject *py_relative_grid_address; double *dos; int *mesh; double *freq_points; int num_freq_points; double *frequencies; double *coef; int (*grid_address)[3]; int num_gp; int num_ir_gp; int num_coef; int num_band; int *grid_mapping_table; int (*relative_grid_address)[4][3]; int is_shift[3] = {0, 0, 0}; int i, j, k, l, m, q, r, count; int g_addr[3]; int ir_gps[24][4]; double tetrahedra[24][4]; int address_double[3]; int *gp2ir, *ir_grid_points, *weights; double iw; gp2ir = NULL; ir_grid_points = NULL; weights = NULL; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dos, &py_mesh, &py_freq_points, &py_frequencies, &py_coef, &py_grid_address, &py_grid_mapping_table, &py_relative_grid_address)) { return NULL; } /* dos[num_ir_gp][num_band][num_freq_points][num_coef] */ dos = (double *)PyArray_DATA(py_dos); mesh = (int *)PyArray_DATA(py_mesh); freq_points = (double *)PyArray_DATA(py_freq_points); num_freq_points = (int)PyArray_DIMS(py_freq_points)[0]; frequencies = (double *)PyArray_DATA(py_frequencies); num_ir_gp = (int)PyArray_DIMS(py_frequencies)[0]; num_band = (int)PyArray_DIMS(py_frequencies)[1]; coef = (double *)PyArray_DATA(py_coef); num_coef = (int)PyArray_DIMS(py_coef)[1]; grid_address = (int (*)[3])PyArray_DATA(py_grid_address); num_gp = (int)PyArray_DIMS(py_grid_address)[0]; grid_mapping_table = (int *)PyArray_DATA(py_grid_mapping_table); relative_grid_address = (int (*)[4][3])PyArray_DATA(py_relative_grid_address); gp2ir = (int *)malloc(sizeof(int) * num_gp); ir_grid_points = (int *)malloc(sizeof(int) * num_ir_gp); weights = (int *)malloc(sizeof(int) * num_ir_gp); count = 0; for (i = 0; i < num_gp; i++) { if (grid_mapping_table[i] == i) { gp2ir[i] = count; ir_grid_points[count] = i; weights[count] = 1; count++; } else { gp2ir[i] = gp2ir[grid_mapping_table[i]]; weights[gp2ir[i]]++; } } if (num_ir_gp != count) { printf("Something is wrong!\n"); } #pragma omp parallel for private(j, k, l, m, q, r, iw, ir_gps, g_addr, tetrahedra, address_double) for (i = 0; i < num_ir_gp; i++) { /* set 24 tetrahedra */ for (l = 0; l < 24; l++) { for (q = 0; q < 4; q++) { for (r = 0; r < 3; r++) { g_addr[r] = grid_address[ir_grid_points[i]][r] + relative_grid_address[l][q][r]; } kgd_get_grid_address_double_mesh(address_double, g_addr, mesh, is_shift); ir_gps[l][q] = gp2ir[kgd_get_grid_point_double_mesh(address_double, mesh)]; } } for (k = 0; k < num_band; k++) { for (l = 0; l < 24; l++) { for (q = 0; q < 4; q++) { tetrahedra[l][q] = frequencies[ir_gps[l][q] * num_band + k]; } } for (j = 0; j < num_freq_points; j++) { iw = thm_get_integration_weight(freq_points[j], tetrahedra, 'I') * weights[i]; for (m = 0; m < num_coef; m++) { dos[i * num_band * num_freq_points * num_coef + k * num_coef * num_freq_points + j * num_coef + m] += iw * coef[i * num_coef * num_band + m * num_band + k]; } } } } free(gp2ir); gp2ir = NULL; free(ir_grid_points); ir_grid_points = NULL; free(weights); weights = NULL; Py_RETURN_NONE; } static double get_free_energy(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ return KB * temperature * log(1 - exp(-f / (KB * temperature))); } static double get_entropy(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ double val; val = f / (2 * KB * temperature); return 1 / (2 * temperature) * f * cosh(val) / sinh(val) - KB * log(2 * sinh(val)); } static double get_heat_capacity(const double temperature, const double f) { /* temperature is defined by T (K) */ /* 'f' must be given in eV. */ /* If val is close to 1. Then expansion is used. */ double val, val1, val2; val = f / (KB * temperature); val1 = exp(val); val2 = (val) / (val1 - 1); return KB * val1 * val2 * val2; } /* static double get_energy(double temperature, double f){ */ /* /\* temperature is defined by T (K) *\/ */ /* /\* 'f' must be given in eV. *\/ */ /* return f / (exp(f / (KB * temperature)) - 1); */ /* } */ static int compute_permutation(int *rot_atom, PHPYCONST double lat[3][3], PHPYCONST double (*pos)[3], PHPYCONST double (*rot_pos)[3], const int num_pos, const double symprec) { int i, j, k, l; int search_start; double distance2, diff_cart; double diff[3]; for (i = 0; i < num_pos; i++) { rot_atom[i] = -1; } /* optimization: Iterate primarily by pos instead of rot_pos. */ /* (find where 0 belongs in rot_atom, then where 1 belongs, etc.) */ /* Then track the first unassigned index. */ /* */ /* This works best if the permutation is close to the identity. */ /* (more specifically, if the max value of 'rot_atom[i] - i' is small) */ search_start = 0; for (i = 0; i < num_pos; i++) { while (rot_atom[search_start] >= 0) { search_start++; } for (j = search_start; j < num_pos; j++) { if (rot_atom[j] >= 0) { continue; } for (k = 0; k < 3; k++) { diff[k] = pos[i][k] - rot_pos[j][k]; diff[k] -= nint(diff[k]); } distance2 = 0; for (k = 0; k < 3; k++) { diff_cart = 0; for (l = 0; l < 3; l++) { diff_cart += lat[k][l] * diff[l]; } distance2 += diff_cart * diff_cart; } if (sqrt(distance2) < symprec) { rot_atom[j] = i; break; } } } for (i = 0; i < num_pos; i++) { if (rot_atom[i] < 0) { printf("Encounter some problem in compute_permutation.\n"); return 0; } } return 1; } /* Implementation detail of get_smallest_vectors. */ /* Finds the smallest vectors within each list and copies them to the output. */ static void gsv_copy_smallest_vectors(double (*shortest_vectors)[27][3], int *multiplicity, PHPYCONST double (*vector_lists)[27][3], PHPYCONST double (*length_lists)[27], const int num_lists, const double symprec) { int i, j, k; int count; double minimum; double (*vectors)[3]; double *lengths; for (i = 0; i < num_lists; i++) { /* Look at a single list of 27 vectors. */ lengths = length_lists[i]; vectors = vector_lists[i]; /* Compute the minimum length. */ minimum = DBL_MAX; for (j = 0; j < 27; j++) { if (lengths[j] < minimum) { minimum = lengths[j]; } } /* Copy vectors whose length is within tolerance. */ count = 0; for (j = 0; j < 27; j++) { if (lengths[j] - minimum <= symprec) { for (k = 0; k < 3; k++) { shortest_vectors[i][count][k] = vectors[j][k]; } count++; } } multiplicity[i] = count; } } static void gsv_set_smallest_vectors(double (*smallest_vectors)[27][3], int *multiplicity, PHPYCONST double (*pos_to)[3], const int num_pos_to, PHPYCONST double (*pos_from)[3], const int num_pos_from, PHPYCONST int lattice_points[27][3], PHPYCONST double reduced_basis[3][3], PHPYCONST int trans_mat[3][3], const double symprec) { int i, j, k, l, count; double length_tmp, minimum, vec_xyz; double length[27], vec[27][3]; for (i = 0; i < num_pos_to; i++) { for (j = 0; j < num_pos_from; j++) { for (k = 0; k < 27; k++) { length[k] = 0; for (l = 0; l < 3; l++) { vec[k][l] = pos_to[i][l] - pos_from[j][l] + lattice_points[k][l]; } for (l = 0; l < 3; l++) { length_tmp = (reduced_basis[l][0] * vec[k][0] + reduced_basis[l][1] * vec[k][1] + reduced_basis[l][2] * vec[k][2]); length[k] += length_tmp * length_tmp; } length[k] = sqrt(length[k]); } minimum = DBL_MAX; for (k = 0; k < 27; k++) { if (length[k] < minimum) { minimum = length[k]; } } count = 0; for (k = 0; k < 27; k++) { if (length[k] - minimum < symprec) { for (l = 0; l < 3; l++) { /* Transform to supercell coordinates */ vec_xyz = (trans_mat[l][0] * vec[k][0] + trans_mat[l][1] * vec[k][1] + trans_mat[l][2] * vec[k][2]); smallest_vectors[i * num_pos_from + j][count][l] = vec_xyz; } count++; } } multiplicity[i * num_pos_from + j] = count; } } } static void distribute_fc2(double (*fc2)[3][3], /* shape[n_pos][n_pos] */ const int *atom_list, const int len_atom_list, PHPYCONST double (*r_carts)[3][3], /* shape[n_rot] */ const int *permutations, /* shape[n_rot][n_pos] */ const int *map_atoms, /* shape [n_pos] */ const int *map_syms, /* shape [n_pos] */ const int num_rot, const int num_pos) { int i, j, k, l, m; int atom_todo, atom_done, atom_other; int sym_index; int *atom_list_reverse; double (*fc2_done)[3]; double (*fc2_todo)[3]; double (*r_cart)[3]; const int *permutation; atom_list_reverse = NULL; atom_list_reverse = (int *)malloc(sizeof(int) * num_pos); /* atom_list_reverse[!atom_done] is undefined. */ for (i = 0; i < len_atom_list; i++) { atom_done = map_atoms[atom_list[i]]; if (atom_done == atom_list[i]) { atom_list_reverse[atom_done] = i; } } for (i = 0; i < len_atom_list; i++) { /* look up how this atom maps into the done list. */ atom_todo = atom_list[i]; atom_done = map_atoms[atom_todo]; sym_index = map_syms[atom_todo]; /* skip the atoms in the done list, */ /* which are easily identified because they map to themselves. */ if (atom_todo == atom_done) { continue; } /* look up information about the rotation */ r_cart = r_carts[sym_index]; permutation = &permutations[sym_index * num_pos]; /* shape[num_pos] */ /* distribute terms from atom_done to atom_todo */ for (atom_other = 0; atom_other < num_pos; atom_other++) { fc2_done = fc2[atom_list_reverse[atom_done] * num_pos + permutation[atom_other]]; fc2_todo = fc2[i * num_pos + atom_other]; for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { /* P' = R^-1 P R */ fc2_todo[j][k] += r_cart[l][j] * r_cart[m][k] * fc2_done[l][m]; } } } } } } free(atom_list_reverse); atom_list_reverse = NULL; } static void set_index_permutation_symmetry_fc(double *fc, const int natom) { int i, j, k, l, m, n; for (i = 0; i < natom; i++) { /* non diagonal part */ for (j = i + 1; j < natom; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { m = i * natom * 9 + j * 9 + k * 3 + l; n = j * natom * 9 + i * 9 + l * 3 + k; fc[m] += fc[n]; fc[m] /= 2; fc[n] = fc[m]; } } } /* diagnoal part */ for (k = 0; k < 2; k++) { for (l = k + 1; l < 3; l++) { m = i * natom * 9 + i * 9 + k * 3 + l; n = i * natom * 9 + i * 9 + l * 3 + k; fc[m] += fc[n]; fc[m] /= 2; fc[n] = fc[m]; } } } } static void set_translational_symmetry_fc(double *fc, const int natom) { int i, j, k, l, m; double sums[3][3]; for (i = 0; i < natom; i++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sums[k][l] = 0; m = i * natom * 9 + k * 3 + l; for (j = 0; j < natom; j++) { if (i != j) { sums[k][l] += fc[m]; } m += 9; } } } for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { fc[i * natom * 9 + i * 9 + k * 3 + l] = -(sums[k][l] + sums[l][k]) / 2; } } } } static void set_index_permutation_symmetry_compact_fc(double *fc, const int p2s[], const int s2pp[], const int nsym_list[], const int perms[], const int n_satom, const int n_patom, const int is_transpose) { int i, j, k, l, m, n, i_p, j_p, i_trans; double fc_elem; char *done; done = NULL; done = (char *)malloc(sizeof(char) * n_satom * n_patom); for (i = 0; i < n_satom * n_patom; i++) { done[i] = 0; } for (j = 0; j < n_satom; j++) { j_p = s2pp[j]; for (i_p = 0; i_p < n_patom; i_p++) { i = p2s[i_p]; if (i == j) { /* diagnoal part */ for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { if (l > k) { m = i_p * n_satom * 9 + i * 9 + k * 3 + l; n = i_p * n_satom * 9 + i * 9 + l * 3 + k; if (is_transpose) { fc_elem = fc[m]; fc[m] = fc[n]; fc[n] = fc_elem; } else { fc[m] = (fc[m] + fc[n]) / 2; fc[n] = fc[m]; } } } } } if (!done[i_p * n_satom + j]) { /* (j, i) -- nsym_list[j] --> (j', i') */ /* nsym_list[j] translates j to j' where j' is in */ /* primitive cell. The same translation sends i to i' */ /* where i' is not necessarily to be in primitive cell. */ /* Thus, i' = perms[nsym_list[j] * n_satom + i] */ i_trans = perms[nsym_list[j] * n_satom + i]; done[i_p * n_satom + j] = 1; done[j_p * n_satom + i_trans] = 1; for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { m = i_p * n_satom * 9 + j * 9 + k * 3 + l; n = j_p * n_satom * 9 + i_trans * 9 + l * 3 + k; if (is_transpose) { fc_elem = fc[m]; fc[m] = fc[n]; fc[n] = fc_elem; } else { fc[m] = (fc[n] + fc[m]) / 2; fc[n] = fc[m]; } } } } } } free(done); done = NULL; } static void set_translational_symmetry_compact_fc(double *fc, const int p2s[], const int n_satom, const int n_patom) { int j, k, l, m, i_p; double sums[3][3]; for (i_p = 0; i_p < n_patom; i_p++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { sums[k][l] = 0; m = i_p * n_satom * 9 + k * 3 + l; for (j = 0; j < n_satom; j++) { if (p2s[i_p] != j) { sums[k][l] += fc[m]; } m += 9; } } } for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { fc[i_p * n_satom * 9 + p2s[i_p] * 9 + k * 3 + l] = -(sums[k][l] + sums[l][k]) / 2; } } } } static int nint(const double a) { if (a < 0.0) return (int)(a - 0.5); else return (int)(a + 0.5); }
threading_utils.h
/*! * Copyright 2015-2019 by Contributors * \file common.h * \brief Threading utilities */ #ifndef XGBOOST_COMMON_THREADING_UTILS_H_ #define XGBOOST_COMMON_THREADING_UTILS_H_ #include <dmlc/common.h> #include <vector> #include <algorithm> #include "xgboost/logging.h" namespace xgboost { namespace common { // Represent simple range of indexes [begin, end) // Inspired by tbb::blocked_range class Range1d { public: Range1d(size_t begin, size_t end): begin_(begin), end_(end) { CHECK_LT(begin, end); } size_t begin() const { // NOLINT return begin_; } size_t end() const { // NOLINT return end_; } private: size_t begin_; size_t end_; }; // Split 2d space to balanced blocks // Implementation of the class is inspired by tbb::blocked_range2d // However, TBB provides only (n x m) 2d range (matrix) separated by blocks. Example: // [ 1,2,3 ] // [ 4,5,6 ] // [ 7,8,9 ] // But the class is able to work with different sizes in each 'row'. Example: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // If grain_size is 2: It produces following blocks: // [1,2], [3,4], [5,6], [7,8], [9] // The class helps to process data in several tree nodes (non-balanced usually) in parallel // Using nested parallelism (by nodes and by data in each node) // it helps to improve CPU resources utilization class BlockedSpace2d { public: // Example of space: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // BlockedSpace2d will create following blocks (tasks) if grain_size=2: // 1-block: first_dimension = 0, range of indexes in a 'row' = [0,2) (includes [1,2] values) // 2-block: first_dimension = 1, range of indexes in a 'row' = [0,2) (includes [3,4] values) // 3-block: first_dimension = 1, range of indexes in a 'row' = [2,4) (includes [5,6] values) // 4-block: first_dimension = 2, range of indexes in a 'row' = [0,2) (includes [7,8] values) // 5-block: first_dimension = 2, range of indexes in a 'row' = [2,3) (includes [9] values) // Arguments: // dim1 - size of the first dimension in the space // getter_size_dim2 - functor to get the second dimensions for each 'row' by row-index // grain_size - max size of produced blocks template<typename Func> BlockedSpace2d(size_t dim1, Func getter_size_dim2, size_t grain_size) { for (size_t i = 0; i < dim1; ++i) { const size_t size = getter_size_dim2(i); const size_t n_blocks = size/grain_size + !!(size % grain_size); for (size_t iblock = 0; iblock < n_blocks; ++iblock) { const size_t begin = iblock * grain_size; const size_t end = std::min(begin + grain_size, size); AddBlock(i, begin, end); } } } // Amount of blocks(tasks) in a space size_t Size() const { return ranges_.size(); } // get index of the first dimension of i-th block(task) size_t GetFirstDimension(size_t i) const { CHECK_LT(i, first_dimension_.size()); return first_dimension_[i]; } // get a range of indexes for the second dimension of i-th block(task) Range1d GetRange(size_t i) const { CHECK_LT(i, ranges_.size()); return ranges_[i]; } private: void AddBlock(size_t first_dimension, size_t begin, size_t end) { first_dimension_.push_back(first_dimension); ranges_.emplace_back(begin, end); } std::vector<Range1d> ranges_; std::vector<size_t> first_dimension_; }; // Wrapper to implement nested parallelism with simple omp parallel for template<typename Func> void ParallelFor2d(const BlockedSpace2d& space, int nthreads, Func func) { const size_t num_blocks_in_space = space.Size(); nthreads = std::min(nthreads, omp_get_max_threads()); nthreads = std::max(nthreads, 1); dmlc::OMPException omp_exc; #pragma omp parallel num_threads(nthreads) { omp_exc.Run([&]() { size_t tid = omp_get_thread_num(); size_t chunck_size = num_blocks_in_space / nthreads + !!(num_blocks_in_space % nthreads); size_t begin = chunck_size * tid; size_t end = std::min(begin + chunck_size, num_blocks_in_space); for (auto i = begin; i < end; i++) { func(space.GetFirstDimension(i), space.GetRange(i)); } }); } omp_exc.Rethrow(); } template <typename Func> void ParallelFor(size_t size, size_t nthreads, Func fn) { dmlc::OMPException omp_exc; #pragma omp parallel for num_threads(nthreads) for (omp_ulong i = 0; i < size; ++i) { omp_exc.Run(fn, i); } omp_exc.Rethrow(); } } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_THREADING_UTILS_H_
#ifndef XGBOOST_COMMON_THREADING_UTILS_H_ #define XGBOOST_COMMON_THREADING_UTILS_H_ #include <dmlc/common.h> #include <vector> #include <algorithm> #include "xgboost/logging.h" namespace xgboost { namespace common { // Represent simple range of indexes [begin, end) // Inspired by tbb::blocked_range class Range1d { public: Range1d(size_t begin, size_t end): begin_(begin), end_(end) { CHECK_LT(begin, end); } size_t begin() const { // NOLINT return begin_; } size_t end() const { // NOLINT return end_; } private: size_t begin_; size_t end_; }; // Split 2d space to balanced blocks // Implementation of the class is inspired by tbb::blocked_range2d // However, TBB provides only (n x m) 2d range (matrix) separated by blocks. Example: // [ 1,2,3 ] // [ 4,5,6 ] // [ 7,8,9 ] // But the class is able to work with different sizes in each 'row'. Example: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // If grain_size is 2: It produces following blocks: // [1,2], [3,4], [5,6], [7,8], [9] // The class helps to process data in several tree nodes (non-balanced usually) in parallel // Using nested parallelism (by nodes and by data in each node) // it helps to improve CPU resources utilization class BlockedSpace2d { public: // Example of space: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // BlockedSpace2d will create following blocks (tasks) if grain_size=2: // 1-block: first_dimension = 0, range of indexes in a 'row' = [0,2) (includes [1,2] values) // 2-block: first_dimension = 1, range of indexes in a 'row' = [0,2) (includes [3,4] values) // 3-block: first_dimension = 1, range of indexes in a 'row' = [2,4) (includes [5,6] values) // 4-block: first_dimension = 2, range of indexes in a 'row' = [0,2) (includes [7,8] values) // 5-block: first_dimension = 2, range of indexes in a 'row' = [2,3) (includes [9] values) // Arguments: // dim1 - size of the first dimension in the space // getter_size_dim2 - functor to get the second dimensions for each 'row' by row-index // grain_size - max size of produced blocks template<typename Func> BlockedSpace2d(size_t dim1, Func getter_size_dim2, size_t grain_size) { for (size_t i = 0; i < dim1; ++i) { const size_t size = getter_size_dim2(i); const size_t n_blocks = size/grain_size + !!(size % grain_size); for (size_t iblock = 0; iblock < n_blocks; ++iblock) { const size_t begin = iblock * grain_size; const size_t end = std::min(begin + grain_size, size); AddBlock(i, begin, end); } } } // Amount of blocks(tasks) in a space size_t Size() const { return ranges_.size(); } // get index of the first dimension of i-th block(task) size_t GetFirstDimension(size_t i) const { CHECK_LT(i, first_dimension_.size()); return first_dimension_[i]; } // get a range of indexes for the second dimension of i-th block(task) Range1d GetRange(size_t i) const { CHECK_LT(i, ranges_.size()); return ranges_[i]; } private: void AddBlock(size_t first_dimension, size_t begin, size_t end) { first_dimension_.push_back(first_dimension); ranges_.emplace_back(begin, end); } std::vector<Range1d> ranges_; std::vector<size_t> first_dimension_; }; // Wrapper to implement nested parallelism with simple omp parallel for template<typename Func> void ParallelFor2d(const BlockedSpace2d& space, int nthreads, Func func) { const size_t num_blocks_in_space = space.Size(); nthreads = std::min(nthreads, omp_get_max_threads()); nthreads = std::max(nthreads, 1); dmlc::OMPException omp_exc; omp_exc.Run([&]() { size_t tid = omp_get_thread_num(); size_t chunck_size = num_blocks_in_space / nthreads + !!(num_blocks_in_space % nthreads); size_t begin = chunck_size * tid; size_t end = std::min(begin + chunck_size, num_blocks_in_space); for (auto i = begin; i < end; i++) { func(space.GetFirstDimension(i), space.GetRange(i)); } }); omp_exc.Rethrow(); } template <typename Func> void ParallelFor(size_t size, size_t nthreads, Func fn) { dmlc::OMPException omp_exc; for (omp_ulong i = 0; i < size; ++i) { omp_exc.Run(fn, i); } omp_exc.Rethrow(); } } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_THREADING_UTILS_H_
#ifndef XGBOOST_COMMON_THREADING_UTILS_H_ #define XGBOOST_COMMON_THREADING_UTILS_H_ #include <dmlc/common.h> #include <vector> #include <algorithm> #include "xgboost/logging.h" namespace xgboost { namespace common { // Represent simple range of indexes [begin, end) // Inspired by tbb::blocked_range class Range1d { public: Range1d(size_t begin, size_t end): begin_(begin), end_(end) { CHECK_LT(begin, end); } size_t begin() const { // NOLINT return begin_; } size_t end() const { // NOLINT return end_; } private: size_t begin_; size_t end_; }; // Split 2d space to balanced blocks // Implementation of the class is inspired by tbb::blocked_range2d // However, TBB provides only (n x m) 2d range (matrix) separated by blocks. Example: // [ 1,2,3 ] // [ 4,5,6 ] // [ 7,8,9 ] // But the class is able to work with different sizes in each 'row'. Example: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // If grain_size is 2: It produces following blocks: // [1,2], [3,4], [5,6], [7,8], [9] // The class helps to process data in several tree nodes (non-balanced usually) in parallel // Using nested parallelism (by nodes and by data in each node) // it helps to improve CPU resources utilization class BlockedSpace2d { public: // Example of space: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // BlockedSpace2d will create following blocks (tasks) if grain_size=2: // 1-block: first_dimension = 0, range of indexes in a 'row' = [0,2) (includes [1,2] values) // 2-block: first_dimension = 1, range of indexes in a 'row' = [0,2) (includes [3,4] values) // 3-block: first_dimension = 1, range of indexes in a 'row' = [2,4) (includes [5,6] values) // 4-block: first_dimension = 2, range of indexes in a 'row' = [0,2) (includes [7,8] values) // 5-block: first_dimension = 2, range of indexes in a 'row' = [2,3) (includes [9] values) // Arguments: // dim1 - size of the first dimension in the space // getter_size_dim2 - functor to get the second dimensions for each 'row' by row-index // grain_size - max size of produced blocks template<typename Func> BlockedSpace2d(size_t dim1, Func getter_size_dim2, size_t grain_size) { for (size_t i = 0; i < dim1; ++i) { const size_t size = getter_size_dim2(i); const size_t n_blocks = size/grain_size + !!(size % grain_size); for (size_t iblock = 0; iblock < n_blocks; ++iblock) { const size_t begin = iblock * grain_size; const size_t end = std::min(begin + grain_size, size); AddBlock(i, begin, end); } } } // Amount of blocks(tasks) in a space size_t Size() const { return ranges_.size(); } // get index of the first dimension of i-th block(task) size_t GetFirstDimension(size_t i) const { CHECK_LT(i, first_dimension_.size()); return first_dimension_[i]; } // get a range of indexes for the second dimension of i-th block(task) Range1d GetRange(size_t i) const { CHECK_LT(i, ranges_.size()); return ranges_[i]; } private: void AddBlock(size_t first_dimension, size_t begin, size_t end) { first_dimension_.push_back(first_dimension); ranges_.emplace_back(begin, end); } std::vector<Range1d> ranges_; std::vector<size_t> first_dimension_; }; // Wrapper to implement nested parallelism with simple omp parallel for template<typename Func> void ParallelFor2d(const BlockedSpace2d& space, int nthreads, Func func) { const size_t num_blocks_in_space = space.Size(); nthreads = std::min(nthreads, omp_get_max_threads()); nthreads = std::max(nthreads, 1); dmlc::OMPException omp_exc; #pragma omp parallel num_threads(nthreads) { omp_exc.Run([&]() { size_t tid = omp_get_thread_num(); size_t chunck_size = num_blocks_in_space / nthreads + !!(num_blocks_in_space % nthreads); size_t begin = chunck_size * tid; size_t end = std::min(begin + chunck_size, num_blocks_in_space); for (auto i = begin; i < end; i++) { func(space.GetFirstDimension(i), space.GetRange(i)); } }); } omp_exc.Rethrow(); } template <typename Func> void ParallelFor(size_t size, size_t nthreads, Func fn) { dmlc::OMPException omp_exc; #pragma omp parallel for num_threads(nthreads) for (omp_ulong i = 0; i < size; ++i) { omp_exc.Run(fn, i); } omp_exc.Rethrow(); } } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_THREADING_UTILS_H_
test.c
// RUN: %libomptarget-compile-run-and-check-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-run-and-check-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-run-and-check-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-run-and-check-x86_64-pc-linux-gnu #define IN_PARALLEL 0 #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #define N 1000 /* * Test if it is possible to: * 1. target enter data to depend 'in' and 'out' * 2. target exit data to depend 'in' and 'out' * 3. Mix target-based tasks with host tasks. */ int main(){ int errors = 0; bool isHost = true; double sum = 0.0; double* h_array = (double *) malloc(N * sizeof(double)); double* in_1 = (double *) malloc(N * sizeof(double)); double* in_2 = (double *) malloc(N * sizeof(double)); #if IN_PARALLEL #pragma omp parallel { #pragma omp master { #endif // host task #pragma omp task depend(out: in_1) shared(in_1) { for (int i = 0; i < N; ++i) { in_1[i] = 1; } } // host task #pragma omp task depend(out: in_2) shared(in_2) { for (int i = 0; i < N; ++i) { in_2[i] = 2; } } // target enter data #pragma omp target enter data nowait map(alloc: h_array[0:N]) map(to: in_1[0:N]) map(to: in_2[0:N]) depend(out: h_array) depend(in: in_1) depend(in: in_2) // target task to compute on the device #pragma omp target nowait map(tofrom: isHost) depend(inout: h_array) { isHost = omp_is_initial_device(); for (int i = 0; i < N; ++i) { h_array[i] = in_1[i]*in_2[i]; } } // target exit data #pragma omp target exit data nowait map(from: h_array[0:N]) depend(inout: h_array) // host task #pragma omp task depend(in: h_array) shared(sum, h_array) { // checking results for (int i = 0; i < N; ++i) { sum += h_array[i]; } } #if IN_PARALLEL } // master } // parallel #else #pragma omp taskwait #endif errors = 2.0*N != sum; if (!errors) printf("Test passed\n"); else printf("Test failed on %s: sum = %g\n", (isHost ? "host" : "device"), sum); return errors; }
// RUN: %libomptarget-compile-run-and-check-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-run-and-check-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-run-and-check-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-run-and-check-x86_64-pc-linux-gnu #define IN_PARALLEL 0 #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #define N 1000 /* * Test if it is possible to: * 1. target enter data to depend 'in' and 'out' * 2. target exit data to depend 'in' and 'out' * 3. Mix target-based tasks with host tasks. */ int main(){ int errors = 0; bool isHost = true; double sum = 0.0; double* h_array = (double *) malloc(N * sizeof(double)); double* in_1 = (double *) malloc(N * sizeof(double)); double* in_2 = (double *) malloc(N * sizeof(double)); #if IN_PARALLEL #pragma omp master { #endif // host task #pragma omp task depend(out: in_1) shared(in_1) { for (int i = 0; i < N; ++i) { in_1[i] = 1; } } // host task for (int i = 0; i < N; ++i) { in_2[i] = 2; } // target enter data // target task to compute on the device isHost = omp_is_initial_device(); for (int i = 0; i < N; ++i) { h_array[i] = in_1[i]*in_2[i]; } // target exit data // host task // checking results for (int i = 0; i < N; ++i) { sum += h_array[i]; } #if IN_PARALLEL } // master // parallel #else #endif errors = 2.0*N != sum; if (!errors) printf("Test passed\n"); else printf("Test failed on %s: sum = %g\n", (isHost ? "host" : "device"), sum); return errors; }
// RUN: %libomptarget-compile-run-and-check-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-run-and-check-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-run-and-check-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-run-and-check-x86_64-pc-linux-gnu #define IN_PARALLEL 0 #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #define N 1000 /* * Test if it is possible to: * 1. target enter data to depend 'in' and 'out' * 2. target exit data to depend 'in' and 'out' * 3. Mix target-based tasks with host tasks. */ int main(){ int errors = 0; bool isHost = true; double sum = 0.0; double* h_array = (double *) malloc(N * sizeof(double)); double* in_1 = (double *) malloc(N * sizeof(double)); double* in_2 = (double *) malloc(N * sizeof(double)); #if IN_PARALLEL #pragma omp parallel { #pragma omp master { #endif // host task #pragma omp task depend(out: in_1) shared(in_1) { for (int i = 0; i < N; ++i) { in_1[i] = 1; } } // host task #pragma omp task depend(out: in_2) shared(in_2) { for (int i = 0; i < N; ++i) { in_2[i] = 2; } } // target enter data #pragma omp target enter data nowait map(alloc: h_array[0:N]) map(to: in_1[0:N]) map(to: in_2[0:N]) depend(out: h_array) depend(in: in_1) depend(in: in_2) // target task to compute on the device #pragma omp target nowait map(tofrom: isHost) depend(inout: h_array) { isHost = omp_is_initial_device(); for (int i = 0; i < N; ++i) { h_array[i] = in_1[i]*in_2[i]; } } // target exit data #pragma omp target exit data nowait map(from: h_array[0:N]) depend(inout: h_array) // host task #pragma omp task depend(in: h_array) shared(sum, h_array) { // checking results for (int i = 0; i < N; ++i) { sum += h_array[i]; } } #if IN_PARALLEL } // master } // parallel #else #pragma omp taskwait #endif errors = 2.0*N != sum; if (!errors) printf("Test passed\n"); else printf("Test failed on %s: sum = %g\n", (isHost ? "host" : "device"), sum); return errors; }
format_trans.h
#ifndef FORMAT_TRANS #define FORMAT_TRANS #include"common.h" #include"mmio_highlevel.h" //#include"mmio.h" #include"utils.h" //calculate the number of non-empty tiles of matrix A void step1_kernel(Beidou_Tile_Matrix *matrix) // (int m, int n, MAT_PTR_TYPE *rowpointer, int *columnidx, // int tilem, int tilen, MAT_PTR_TYPE *tile_ptr, int *numtile) { int *rowpointer=matrix->rowpointer; int m = matrix->m; int n = matrix->n; int *columnidx = matrix->columnidx; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int numtile = matrix->numtile; unsigned thread = omp_get_max_threads(); printf("threads=%i\n",thread); char *flag_g=(char *)malloc(thread*tilen * sizeof(char)); #pragma omp parallel for for (int blki = 0; blki < tilem; blki ++) { int thread_id = omp_get_thread_num(); // printf("id =%d\n",thread_id); char *flag = flag_g+ thread_id * tilen; memset(flag,0,tilen * sizeof(char)); int start = blki *BLOCK_SIZE; int end = blki == tilem-1 ? m : (blki+1)* BLOCK_SIZE ; for (int j = rowpointer[start]; j < rowpointer[end]; j ++) { int jc = columnidx[j] / BLOCK_SIZE; if (flag[jc]==0) { flag[jc]=1; tile_ptr[blki]++; } } // free(flag); } free(flag_g); } void step2_kernel(Beidou_Tile_Matrix *matrix) // (int rowA, int coA, int *rowpointerA, int *columnindexA, // int tilem, int tilenA, MAT_PTR_TYPE *tile_ptr, int *tile_columnidx, int numtileA) { int *rowpointer=matrix->rowpointer; int *columnidx = matrix->columnidx; int m = matrix->m; int n = matrix->n; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int numtile = matrix->numtile; int *tile_columnidx = matrix->columnidx; int colid =0; char *flag=(char *)malloc(tilen * sizeof(char)); for (int i=0;i<tilem;i++) { memset(flag,0,tilen*sizeof(char)); int start= i*BLOCK_SIZE; int end = i== tilem-1 ? m : (i+1)*BLOCK_SIZE ; for (int j=rowpointer[start];j< rowpointer[end];j++) { int jc=columnidx[j]/BLOCK_SIZE; if (flag[jc]==0) { flag[jc]=1; tile_ptr[i+1]++; tile_columnidx[colid]=jc; colid++; } } } for (int i=1;i<tilem+1;i++) { tile_ptr[i] += tile_ptr[i-1]; } } //determine the tile structure (tileptr , tile columnidx and tile_nnz) of matrix A. void step2_kernel_new (Beidou_Tile_Matrix *matrix, unsigned char *tile_csr_ptr) // (int m, int n, int *rowpointer, int *columnidx, // int tilem, int tilen, MAT_PTR_TYPE *tile_ptr, int *tile_columnidx, int *tile_nnz, // unsigned char *tile_csr_ptr, int numtile) { int m = matrix->m; int n = matrix->n; int *rowpointer=matrix->rowpointer; int *columnidx = matrix->columnidx; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int *tile_columnidx = matrix->tile_columnidx; int *tile_nnz = matrix->tile_nnz; int numtile = matrix->numtile; unsigned thread = omp_get_max_threads(); char *col_temp_g=(char *)malloc((thread * tilen) * sizeof(char)); int *nnz_temp_g=(int *)malloc((thread * tilen) * sizeof(int)); unsigned char *ptr_per_tile_g = (unsigned char *)malloc((thread * tilen * BLOCK_SIZE) * sizeof(unsigned char)); #pragma omp parallel for for (int blki = 0; blki < tilem; blki ++) { int thread_id = omp_get_thread_num(); char *col_temp = col_temp_g + thread_id * tilen; memset(col_temp,0,tilen * sizeof(char)); int *nnz_temp = nnz_temp_g + thread_id * tilen; memset(nnz_temp,0,tilen * sizeof(int)); unsigned char *ptr_per_tile = ptr_per_tile_g + thread_id * tilen * BLOCK_SIZE; memset(ptr_per_tile, 0, tilen * BLOCK_SIZE * sizeof(unsigned char)); int pre_tile = tile_ptr[blki]; int rowlen = blki==tilem-1 ? m-(tilem-1)*BLOCK_SIZE : BLOCK_SIZE ; int start= blki * BLOCK_SIZE; int end = blki==tilem-1 ? m : (blki +1)*BLOCK_SIZE ; for (int ri=0 ; ri < rowlen ; ri ++) { for (int j=rowpointer[start + ri];j<rowpointer[start + ri +1];j++) { int jc = columnidx[j]/BLOCK_SIZE; col_temp[jc] = 1; nnz_temp[jc] ++; ptr_per_tile[jc * BLOCK_SIZE + ri] ++; } } int count =0; for (int blkj=0 ;blkj < tilen; blkj++) { if (col_temp[blkj] == 1) { tile_columnidx[pre_tile + count] = blkj; tile_nnz[pre_tile + count] = nnz_temp[blkj]; for (int ri =0; ri < rowlen ; ri ++) { tile_csr_ptr[(pre_tile + count) * BLOCK_SIZE + ri] = ptr_per_tile[blkj * BLOCK_SIZE + ri]; } count ++; } } } free(col_temp_g); free(nnz_temp_g); free(ptr_per_tile_g); } void step3_kernel_new(Beidou_Tile_Matrix *matrix, int *new_coocount) { int *rowpointer=matrix->rowpointer; int *columnidx = matrix->columnidx; int m = matrix->m; int n = matrix->n; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int numtile = matrix->numtile; int *tile_columnidx = matrix->tile_columnidx; int *tile_nnz = matrix->tile_nnz; char *Format = matrix->Format; int *blknnz = matrix->blknnz; char *blkwidth = matrix->blkwidth; int *denserowptr = matrix->denserowptr; int *densecolptr = matrix->densecolptr; unsigned char *tile_csr_ptr = matrix->csr_ptr; int *hyb_coocount = matrix->hyb_coocount; int *csr_offset = matrix->csr_offset; int *csrptr_offset = matrix->csrptr_offset; int *coo_offset = matrix->coo_offset; int *ell_offset = matrix->ell_offset; int *hyb_offset = matrix->hyb_offset; int *dns_offset = matrix->dns_offset; int *dnsrow_offset = matrix->dnsrow_offset; int *dnscol_offset = matrix->dnscol_offset; #pragma omp parallel for for (int blki=0;blki<tilem;blki++) { int tilenum_per_row=tile_ptr[blki+1]-tile_ptr[blki]; int rowlen= blki==tilem-1 ? m-(tilem-1)*BLOCK_SIZE : BLOCK_SIZE ; for (int bi=0;bi<tilenum_per_row;bi++) { int collen = tile_columnidx[tile_ptr[blki]+bi] == tilen-1 ? n - (tilen-1 ) * BLOCK_SIZE : BLOCK_SIZE ; int tile_id = tile_ptr[blki]+bi; int tilennz = tile_nnz[tile_id +1] - tile_nnz[tile_id]; int nnzthreshold = rowlen * collen * 0.5 ; // if (1) // { // Format[tile_id] =0 ; // blknnz[tile_id] = tilennz ; // csr_offset[tile_id] = tilennz; // csrptr_offset[tile_id] = rowlen; // } if (tilennz >= nnzthreshold) //if the number of nnz is more than 128, then dense { Format[tile_id] = 4 ; blknnz[tile_id] = rowlen * collen; dns_offset[tile_id] = rowlen * collen; continue; } if (tilennz <= COO_THRESHOLD) //else if the number of nnz is less than 12, then coo { Format[tile_id] = 1 ; blknnz[tile_id] = tilennz; coo_offset[tile_id] = tilennz; new_coocount[tile_id] = tilennz; continue; } else if (tilennz % collen ==0 || tilennz % rowlen ==0) { int dnsrowflag =0 ; int numdnsrow =0; int dnscolflag =0; int numdnscol =0; for (int ri=0;ri < rowlen ;ri++) { if (tile_csr_ptr[tile_id * BLOCK_SIZE + ri] % collen !=0) { dnsrowflag =0; break; } else { if (tile_csr_ptr[tile_id * BLOCK_SIZE + ri] == collen) { dnsrowflag =1; numdnsrow ++ ; } } } if (dnsrowflag == 1) { Format[tile_id] = 5 ; //Dense Row denserowptr[tile_id] = numdnsrow ; blknnz[tile_id] = numdnsrow * collen; dnsrow_offset[tile_id] = numdnsrow * collen; continue; } else { int start = blki*BLOCK_SIZE; int end = blki==tilem-1 ? m : (blki+1)*BLOCK_SIZE ; int jc = tile_columnidx[tile_id]; unsigned char *dnscol_colidx_temp= (unsigned char *)malloc(tilennz * sizeof(unsigned char)); memset(dnscol_colidx_temp, -1, tilennz * sizeof(unsigned char)); // int k=0; unsigned char *col_flag =(unsigned char *)malloc(collen * sizeof(unsigned char)); memset(col_flag, 0, collen * sizeof(unsigned char)); for (int blkj = rowpointer[start]; blkj < rowpointer[end]; blkj ++) { int jc_temp = columnidx[blkj]/BLOCK_SIZE; if (jc_temp == jc) { int col_temp = columnidx[blkj] - jc * BLOCK_SIZE; col_flag[col_temp] ++; // dnscol_colidx_temp[k]= columnindexA[blkj] - jc * BLOCK_SIZE; // if (tile_id == 389) // printf("colidx = %i\n", dnscol_colidx_temp[k]); // k++; } } for (int j =0; j < collen; j ++) { if (col_flag[j] % rowlen !=0) { dnscolflag =0; break; } else { if (col_flag[j] == rowlen) { dnscolflag =1; numdnscol ++ ; } } } if (dnscolflag == 1) { // printf("numdnscol = %i\n", numdnscol); Format[tile_id] = 6 ; //Dense Col densecolptr[tile_id] = numdnscol ; blknnz[tile_id] = numdnscol * rowlen; dnscol_offset[tile_id] = numdnscol * rowlen; continue; } // unsigned char *trans_ptr= (unsigned char *)malloc(collen * sizeof(unsigned char)); // memset(trans_ptr, 0, collen * sizeof(unsigned char)); // for (int ni =0; ni < tilennz; ni ++) // { // int coltemp = dnscol_colidx_temp[ni]; // trans_ptr[coltemp]++; // } // for (int ri=0;ri < rowlen ;ri++) // { // if (trans_ptr[ri] % rowlen !=0) // { // dnscolflag =0; // break; // } // else // { // if (trans_ptr[ri] == rowlen) // { // dnscolflag =1; // numdnscol ++ ; // } // } // } // if (dnscolflag == 1) // { // // printf("numdnscol = %i\n", numdnscol); // Format[tile_id] = 6 ; //Dense Col // densecolptr[tile_id] = numdnscol ; // blknnz[tile_id] = numdnscol * rowlen; // dnscol_offset[tile_id] = numdnscol * rowlen; // continue; // } } } if (Format[tile_id] != 5 && Format[tile_id] !=6) { int bwidth=0; for (int blkj=0;blkj<rowlen;blkj++) { if (bwidth < tile_csr_ptr[tile_id * BLOCK_SIZE + blkj] ) bwidth = tile_csr_ptr[tile_id * BLOCK_SIZE + blkj] ; } double row_length_mean = ((double)tilennz) / rowlen; double variance = 0.0; double row_length_skewness = 0.0; for (int row = 0; row < rowlen; ++row) { int length = tile_csr_ptr[tile_id * BLOCK_SIZE + row]; double delta = (double)(length - row_length_mean); variance += (delta * delta); row_length_skewness += (delta * delta * delta); } variance /= rowlen; double row_length_std_dev = sqrt(variance); row_length_skewness = (row_length_skewness / rowlen) / pow(row_length_std_dev, 3.0); double row_length_variation = row_length_std_dev / row_length_mean; double ell_csr_threshold = 0.2; double csr_hyb_threshold = 1.0; if (row_length_variation <= ell_csr_threshold) // if variation is less than 0.2, then ELL { Format[tile_id] = 2; blkwidth[tile_id]=bwidth; blknnz[tile_id] = bwidth * rowlen ; ell_offset[tile_id] = bwidth * rowlen; } else { int hybwidth=bwidth; int iopriorsize= bwidth * rowlen * sizeof (MAT_VAL_TYPE) + bwidth * rowlen * sizeof (unsigned char) ; // bwidth * rowlen * sizeof (MAT_VAL_TYPE) + bwidth * rowlen * sizeof (char) /2 +1 ; int ionextsize; int coonextnum=0; int coopriornum=0; for (int wi=bwidth-1;wi>0;wi--) { coonextnum=0; for (int blkj=0;blkj<rowlen;blkj++) { if ( tile_csr_ptr[tile_id * BLOCK_SIZE + blkj]> wi) { coonextnum += tile_csr_ptr[tile_id * BLOCK_SIZE + blkj] - wi ; } } ionextsize= wi * rowlen * sizeof (MAT_VAL_TYPE )+ wi * rowlen * sizeof (unsigned char) + coonextnum * (sizeof (MAT_VAL_TYPE) + sizeof (unsigned char)) ; // wi * rowlen * sizeof (MAT_VAL_TYPE )+ wi * rowlen * sizeof (char) /2 + 1 + coonextnum * (sizeof (MAT_VAL_TYPE) + sizeof (char)) ; if (iopriorsize<=ionextsize) { hybwidth=wi+1; break; } else { hybwidth = wi; iopriorsize=ionextsize; coopriornum=coonextnum; } } if (row_length_variation >= csr_hyb_threshold )//&& coopriornum <= 4) // if variation > 1.0, and the number of coo data <=4, then HYB { Format[tile_id] = 3; hyb_coocount[tile_id] = coopriornum; blkwidth[tile_id]=hybwidth; blknnz[tile_id] = coopriornum + hybwidth * rowlen ; hyb_offset[tile_id] = coopriornum + hybwidth * rowlen; new_coocount[tile_id] = coopriornum; } else //else CSR { Format[tile_id] =0 ; blknnz[tile_id] = tilennz ; csr_offset[tile_id] = tilennz; csrptr_offset[tile_id] = BLOCK_SIZE; } } } } } } void step4_kernel(Beidou_Tile_Matrix *matrix, unsigned char *csr_ptr, int *hyb_coocount, int nnz_temp, int tile_count_temp, int *csr_offset, int *csrptr_offset, int *coo_offset, int *ell_offset, int *hyb_offset, int *dns_offset, int *dnsrow_offset, int *dnscol_offset, MAT_VAL_TYPE *new_coo_value, int *new_coo_colidx, int *new_coo_rowidx, int *new_coocount) // (int m, int n, int *rowpointer, int *columnidx, MAT_VAL_TYPE *value, // int tilem, int tilen, int numtile, MAT_PTR_TYPE *tile_ptr, int *tile_columnidx, int *tile_nnz, char *Format, // int *blknnz, unsigned char *csr_ptr, int nnz_temp, int tile_count_temp, // MAT_VAL_TYPE *Tile_csr_Val, unsigned char *Tile_csr_Col, unsigned char *Tile_csr_Ptr, int *csr_offset, int *csrptr_offset, // MAT_VAL_TYPE *Tile_coo_Val, unsigned char *Tile_coo_colIdx, unsigned char *Tile_coo_rowIdx, int *coo_offset, // MAT_VAL_TYPE *Tile_ell_Val, unsigned char *Tile_ell_colIdx, char *blkwidth, int *ell_offset, // MAT_VAL_TYPE *Tile_hyb_Val, unsigned char *Tile_hyb_ellcolIdx, unsigned char *Tile_hyb_coorowIdx, int *hyb_coocount, int *hyb_offset, // MAT_VAL_TYPE *Tile_dns_Val, int *dns_offset, // MAT_VAL_TYPE *Tile_dnsrow_Val, char *Tile_dnsrow_idx, int * denserowptr, int *dnsrow_offset, // MAT_VAL_TYPE *Tile_dnscol_Val, char *Tile_dnscol_idx, int *densecolptr, int *dnscol_offset){ { int *rowpointer=matrix->rowpointer; int *columnidx = matrix->columnidx; MAT_VAL_TYPE *value = matrix->value; int m = matrix->m; int n = matrix->n; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int numtile = matrix->numtile; int *tile_columnidx = matrix->tile_columnidx; int *tile_nnz = matrix->tile_nnz; char *Format = matrix->Format; int *blknnz = matrix->blknnz; char *blkwidth = matrix->blkwidth; MAT_VAL_TYPE *Tile_csr_Val = matrix->Tile_csr_Val; unsigned char *Tile_csr_Col = matrix->Tile_csr_Col; unsigned char *Tile_csr_Ptr = matrix->Tile_csr_Ptr; MAT_VAL_TYPE *Tile_coo_Val = matrix->Tile_coo_Val; unsigned char *Tile_coo_colIdx = matrix->Tile_coo_colIdx; unsigned char *Tile_coo_rowIdx = matrix->Tile_coo_rowIdx; MAT_VAL_TYPE *Tile_ell_Val = matrix->Tile_ell_Val; unsigned char *Tile_ell_colIdx = matrix->Tile_ell_colIdx; MAT_VAL_TYPE *Tile_hyb_Val = matrix->Tile_hyb_Val; unsigned char *Tile_hyb_ellcolIdx = matrix->Tile_hyb_ellcolIdx; unsigned char *Tile_hyb_coorowIdx = matrix->Tile_hyb_coorowIdx; MAT_VAL_TYPE *Tile_dns_Val = matrix->Tile_dns_Val; MAT_VAL_TYPE *Tile_dnsrow_Val = matrix->Tile_dnsrow_Val; char *Tile_dnsrow_idx = matrix->Tile_dnsrow_idx; MAT_VAL_TYPE *Tile_dnscol_Val = matrix->Tile_dnscol_Val; char *Tile_dnscol_idx = matrix->Tile_dnscol_idx; int *denserowptr = matrix->denserowptr; int *densecolptr = matrix->densecolptr; unsigned short *mask = matrix->mask; unsigned thread = omp_get_max_threads(); unsigned char *csr_colidx_temp_g=(unsigned char*)malloc((thread * nnz_temp )*sizeof(unsigned char)); MAT_VAL_TYPE *csr_val_temp_g=(MAT_VAL_TYPE*)malloc((thread * nnz_temp)*sizeof(MAT_VAL_TYPE)); int *tile_count_g = (int *)malloc(thread * tile_count_temp * sizeof(int)); //for each tile #pragma omp parallel for for (int blki=0;blki<tilem;blki++) { int thread_id = omp_get_thread_num(); unsigned char *csr_colidx_temp = csr_colidx_temp_g + thread_id * nnz_temp; MAT_VAL_TYPE *csr_val_temp = csr_val_temp_g + thread_id * nnz_temp; int *tile_count = tile_count_g + thread_id * tile_count_temp; // unsigned char *csr_colidx_temp = (unsigned char *)malloc((nnz_temp )*sizeof(unsigned char)); // MAT_VAL_TYPE *csr_val_temp = (MAT_VAL_TYPE *)malloc((nnz_temp)*sizeof(MAT_VAL_TYPE)); // int *tile_count = (int *)malloc(tile_count_temp * sizeof(int)); memset(csr_colidx_temp, 0, (nnz_temp)*sizeof(unsigned char)); memset(csr_val_temp, 0, (nnz_temp)*sizeof(MAT_VAL_TYPE)); memset(tile_count, 0, (tile_count_temp)*sizeof(int)); int tilenum_per_row=tile_ptr[blki+1]-tile_ptr[blki]; int rowlen= blki==tilem-1 ? m-(tilem-1)*BLOCK_SIZE : BLOCK_SIZE ; int start = blki*BLOCK_SIZE; int end = blki==tilem-1 ? m : (blki+1)*BLOCK_SIZE ; // if (blki == 978) // { // printf("thread_id= ,tilenum_per_row=%i, nnz = %i\n", tilenum_per_row, rowpointerA[end]-rowpointerA[start]); // printf("start = %i, end = %i\n",start, end); // } for (int blkj = rowpointer[start]; blkj < rowpointer[end]; blkj ++) { int jc_temp = columnidx[blkj]/BLOCK_SIZE; // printf("blkj = %i,col=%i\n", blkj, jc_temp); for (int bi = 0; bi < tilenum_per_row; bi ++) { int tile_id = tile_ptr[blki]+bi; int jc = tile_columnidx[tile_id]; int pre_nnz = tile_nnz[tile_id] - tile_nnz[tile_ptr[blki]]; if (jc == jc_temp) { csr_val_temp[pre_nnz + tile_count[bi]] = value[blkj]; csr_colidx_temp[pre_nnz + tile_count[bi]] = columnidx[blkj] - jc * BLOCK_SIZE; // printf("tile_id = %i, tilennz = %i, jc = %i, prennz = %i, val[%i]=%f,col_before= %i, col[] = %i\n",tile_id, tilennz, jc, pre_nnz,pre_nnz + tile_count[bi],csr_val_temp[pre_nnz + tile_count[bi]], columnindexA[blkj],csr_colidx_temp[pre_nnz + tile_count[bi]]); tile_count[bi] ++; break; } } } for (int bi = 0; bi < tilenum_per_row; bi ++) { int tile_id = tile_ptr[blki]+bi; int pre_nnz = tile_nnz[tile_id] - tile_nnz[tile_ptr[blki]]; int tilennz = tile_nnz[tile_id +1] - tile_nnz[tile_id]; //blknnz[tile_id+1] - blknnz[tile_id] ; int collen = tile_columnidx[tile_id] == tilen-1 ? n - (tilen-1 ) * BLOCK_SIZE : BLOCK_SIZE ; int format = Format[tile_id]; switch (format) { case 0: { int offset = csr_offset[tile_id]; int ptr_offset = csrptr_offset[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); for (int ri =0; ri < rowlen; ri ++) { int start = ptr_temp[ri]; int stop = ri == rowlen -1 ? tilennz : ptr_temp[ ri +1];; for (int k =start; k < stop; k ++) { unsigned char colidx = csr_colidx_temp[pre_nnz + k]; Tile_csr_Val[offset + k] = csr_val_temp[pre_nnz + k]; Tile_csr_Col[offset + k] = csr_colidx_temp[pre_nnz + k]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } Tile_csr_Ptr[ptr_offset+ ri] = ptr_temp[ri]; } // for (int k = 0; k < tilennz; k++) // { // Tile_csr_Val[offset + k] = csr_val_temp[pre_nnz + k]; // Tile_csr_Col[offset + k] = csr_colidx_temp[pre_nnz + k]; // } // //CSR ptr // for (int pid=0; pid<rowlen; pid++) // { // Tile_csr_Ptr[ptr_offset+ pid] = ptr_temp[pid]; // mask[tile_id * BLOCK_SIZE + pid] |= (0x1 << (BLOCK_SIZE - colidx - 1)); // // printf("tile_csr_ptr = %i, csr_ptr = %i\n", Tile_csr_Ptr[ptr_offset+ pid] , csr_ptr[tile_id * BLOCK_SIZE + pid]); // } // unsigned char old_val = Tile_csr_Ptr[ptr_offset]; // unsigned char new_val; // Tile_csr_Ptr[ptr_offset] =0; // for (int pid =1; pid < BLOCK_SIZE; pid ++) // { // new_val = Tile_csr_Ptr[ptr_offset+pid]; // Tile_csr_Ptr[ptr_offset+pid] = old_val + Tile_csr_Ptr[ptr_offset+pid -1]; // old_val = new_val; // } break; } case 1: { if(SPMV && !SPGEMM) { // printf("do spmv operation\n"); int colidx_temp = tile_columnidx[tile_id]; int offset_new = new_coocount[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ ri +1]; for (int j = ptr_temp[ri]; j < nnz_end; j++) { new_coo_rowidx[offset_new + j] = ri + blki * BLOCK_SIZE; new_coo_value[offset_new + j] = csr_val_temp[pre_nnz + j] ; new_coo_colidx[offset_new + j]=csr_colidx_temp[pre_nnz + j] + colidx_temp * BLOCK_SIZE; } } } if(SPGEMM && !SPMV) { // printf("do spgemm operation\n"); int offset = coo_offset[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, rowlen); for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ ri +1]; for (int j = ptr_temp[ ri]; j < nnz_end; j++) { unsigned char colidx = csr_colidx_temp[pre_nnz + j]; Tile_coo_rowIdx[offset+ j] = ri; Tile_coo_Val[offset + j] = csr_val_temp[pre_nnz + j] ; Tile_coo_colIdx[offset + j]=csr_colidx_temp[pre_nnz + j]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } } break; } case 2: { int offset = ell_offset[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ri +1]; for (int j = ptr_temp[ri]; j < nnz_end; j++) { int colidx = csr_colidx_temp[pre_nnz + j]; int temp = j - ptr_temp[ri]; Tile_ell_colIdx[offset + temp * rowlen + ri] = csr_colidx_temp[pre_nnz + j]; Tile_ell_Val[offset + temp * rowlen + ri] = csr_val_temp[pre_nnz + j]; // mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } for (int ri =0; ri < rowlen; ri ++) { for (int bi = 0; bi < blkwidth[tile_id]; bi ++) { int colidx = Tile_ell_colIdx[offset + bi * rowlen + ri]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } break; } case 3: { int colidx_temp = tile_columnidx[tile_id]; int offset = hyb_offset[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); int offset_new = new_coocount[tile_id]; int coocount=0; for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ri +1]; int stop= (nnz_end- ptr_temp[ri]) <= blkwidth[tile_id] ? nnz_end : ptr_temp[ri] + blkwidth[tile_id] ; for (int j = ptr_temp[ri]; j < stop; j++) { int colidx = csr_colidx_temp[pre_nnz + j]; // printf("row = %i, j = %i, colidx = %i\n",ri,j,colidx); int temp = j - ptr_temp[ri]; Tile_hyb_ellcolIdx[offset + temp * rowlen + ri] = csr_colidx_temp[pre_nnz + j]; Tile_hyb_Val[offset + temp * rowlen + ri] = csr_val_temp[pre_nnz + j]; // mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); // printf("pos = %i, mask = %i\n", ri, mask[tile_id * BLOCK_SIZE + ri]); } if (SPGEMM && !SPMV) { for (int k=stop; k< nnz_end; k++) { unsigned char colidx = csr_colidx_temp[pre_nnz +k]; Tile_hyb_Val[offset + blkwidth[tile_id] * rowlen + coocount] = csr_val_temp[pre_nnz +k]; Tile_hyb_ellcolIdx[offset + blkwidth[tile_id] * rowlen + coocount] = csr_colidx_temp[pre_nnz +k]; Tile_hyb_coorowIdx[hyb_coocount[tile_id] + coocount] = ri; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); coocount++; } } if(SPMV && !SPGEMM) { for (int k=stop; k< nnz_end; k++) { new_coo_value[offset_new + coocount] = csr_val_temp[pre_nnz +k]; new_coo_colidx[offset_new+coocount] = csr_colidx_temp[pre_nnz +k] + colidx_temp * BLOCK_SIZE; new_coo_rowidx[offset_new+coocount] = ri + blki * BLOCK_SIZE; coocount++; } } } for (int ri =0; ri < rowlen; ri ++) { for (int bi = 0; bi < blkwidth[tile_id]; bi ++) { int colidx = Tile_hyb_ellcolIdx[offset + bi * rowlen + ri]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } break; } case 4: { int offset = dns_offset[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ri +1]; for (int j = ptr_temp[ri]; j < nnz_end; j++) { unsigned char colidx = csr_colidx_temp[pre_nnz +j]; Tile_dns_Val[offset + csr_colidx_temp[pre_nnz + j] * rowlen +ri] = csr_val_temp[pre_nnz + j]; // mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); // Blockdense_Val[dnsnum[rowblock_ptr[rbi]+bi] + subrowmatrixA[bi].columnindex[j] * rowlength + ri]= subrowmatrixA[bi].value[j]; } } for (int ri =0; ri < rowlen; ri ++) { for(int j =0; j < BLOCK_SIZE; j ++) { mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - j - 1)); } } break; } case 5: { int offset = dnsrow_offset[tile_id]; int rowoffset = denserowptr[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); int dnsriid=0; for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ri +1]; if (nnz_end - ptr_temp[ri] == collen) { // printf("tileid = %i, offset = %i, rowoffset = %i, num = %i\n", tile_id, offset, rowoffset, csr_ptr[tile_id * BLOCK_SIZE + ri]); Tile_dnsrow_idx[rowoffset + dnsriid]=ri; dnsriid ++; for (int j = ptr_temp[ri]; j < nnz_end; j++) { unsigned char colidx = csr_colidx_temp[pre_nnz +j]; Tile_dnsrow_Val[offset + j] = csr_val_temp[pre_nnz + j]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } } break; } case 6: { int offset = dnscol_offset[tile_id]; int coloffset = densecolptr[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); // for (int ni =0; ni < rowlen ; ni ++) // { // printf("%i ", ptr_temp[ni]); // } // printf("\n"); int dnsciid=0; for (int j=ptr_temp[0];j < ptr_temp[1];j ++) { int ci = csr_colidx_temp[pre_nnz + j]; // int ci = subrowmatrixA[bi].columnindex[j] ; Tile_dnscol_idx[coloffset + dnsciid] =ci ; // printf("pos=%i, colidx=%i\n",densecolptr[tile_id + dnsciid],Tile_dnscol_idx[coloffset + dnsciid] ); dnsciid++; } for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ri +1]; for (int j = ptr_temp[ri]; j < nnz_end; j++) { int temp = j - ptr_temp[ri]; unsigned char colidx = csr_colidx_temp[pre_nnz +j];//temp; // if (csr_val_temp[pre_nnz +j] != 0) // printf("idx = %i, col=%i, val = %f\n",pre_nnz +j, csr_colidx_temp[pre_nnz +j] , csr_val_temp[pre_nnz +j]); Tile_dnscol_Val[offset + temp * rowlen + ri] = csr_val_temp[pre_nnz +j]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } break; } default: break; } } } free(csr_colidx_temp_g); free(csr_val_temp_g); free(tile_count_g); } void Tile_destroy(Beidou_Tile_Matrix *matrix) { free(matrix->Tile_csr_Col); matrix->Tile_csr_Col = NULL; free(matrix->Tile_csr_Ptr); matrix->Tile_csr_Ptr = NULL; free(matrix->Tile_csr_Val); matrix->Tile_csr_Val = NULL; free(matrix->Tile_coo_colIdx); matrix->Tile_coo_colIdx = NULL; free(matrix->Tile_coo_rowIdx); matrix->Tile_coo_rowIdx = NULL; free(matrix->Tile_coo_Val); matrix->Tile_coo_Val = NULL; free(matrix->Tile_ell_colIdx); matrix->Tile_ell_colIdx = NULL; free(matrix->Tile_ell_Val); matrix->Tile_ell_Val = NULL; free(matrix->Tile_hyb_coorowIdx); matrix->Tile_hyb_coorowIdx = NULL; free(matrix->Tile_hyb_ellcolIdx); matrix->Tile_hyb_ellcolIdx = NULL; free(matrix->Tile_hyb_Val); matrix->Tile_hyb_Val = NULL; free(matrix->Tile_dns_Val); matrix->Tile_dns_Val = NULL; free(matrix->Tile_dnsrow_idx); matrix->Tile_dnsrow_idx = NULL; free(matrix->Tile_dnsrow_Val); matrix->Tile_dnsrow_Val = NULL; free(matrix->Tile_dnscol_Val); matrix->Tile_dnscol_Val = NULL; free(matrix->Tile_dnscol_idx); matrix->Tile_dnscol_idx = NULL; free(matrix->densecolptr); matrix->densecolptr = NULL; free(matrix->denserowptr); matrix->denserowptr = NULL; free(matrix->blkwidth); matrix->blkwidth = NULL; free(matrix->tile_ptr); matrix->tile_ptr = NULL; free(matrix->tile_columnidx); matrix->tile_columnidx = NULL; free(matrix->tile_nnz); matrix->tile_nnz = NULL; free(matrix->blknnz); matrix->blknnz = NULL; free(matrix->value); matrix->value = NULL; free(matrix->columnidx); matrix->columnidx = NULL; free(matrix->coo_new_matrix_ptr); matrix->coo_new_matrix_ptr = NULL; free(matrix->coo_new_rowidx); matrix->coo_new_rowidx = NULL; free(matrix->coo_new_matrix_value); matrix->coo_new_matrix_value = NULL; free(matrix->coo_new_matrix_colidx); matrix->coo_new_matrix_colidx = NULL; free(matrix->csr_ptr); free(matrix->csr_offset); free(matrix->csrptr_offset); free(matrix->coo_offset); free(matrix->ell_offset); free(matrix->hyb_offset); free(matrix->dns_offset); free(matrix->dnsrow_offset); free(matrix->dnscol_offset); } void format_transform(Beidou_Tile_Matrix *matrix, MAT_VAL_TYPE **new_coo_value_temp, int **new_coo_colidx_temp, int **new_coo_rowidx_temp, int **new_coocount_temp) { //unsigned char *csr_ptr = matrix->csr_ptr; // int *csr_offset = matrix->csr_offset; // int *csrptr_offset = matrix->csrptr_offset; // int *coo_offset = matrix->coo_offset; // int *ell_offset = matrix->ell_offset; // int *hyb_offset = matrix->hyb_offset; // int *dns_offset = matrix->dns_offset; // int *dnsrow_offset = matrix->dnsrow_offset; // int *dnscol_offset = matrix->dnscol_offset; // int *hyb_coocount = matrix->hyb_coocount; struct timeval t1, t2; gettimeofday(&t1, NULL); // step1_kernel(matrixA->m, matrixA->n, matrixA->rowpointer, matrixA->columnidx, // matrixA->tilem, matrixA->tilen, matrixA->tile_ptr, &matrixA->numtile); step1_kernel(matrix); gettimeofday(&t2, NULL); // printf("t1=%f,t2=%f\n",t1,t2); double time_step1 = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; printf("transform_step1 runtime = %4.5f ms\n", time_step1); exclusive_scan(matrix->tile_ptr, matrix->tilem+1); matrix->numtile = matrix->tile_ptr[matrix->tilem]; printf("the number of tiles in matrix A= %d\n",matrix->numtile); matrix->tile_columnidx=(int *)malloc(matrix->numtile*sizeof(int)); memset(matrix->tile_columnidx, 0, matrix->numtile*sizeof(int)); matrix->tile_nnz = (int *)malloc((matrix->numtile + 1)* sizeof(int)); //real nnz of each sparse tile memset(matrix->tile_nnz,0,(matrix->numtile + 1) * sizeof(int)); matrix->csr_ptr = (unsigned char *)malloc((matrix->numtile * BLOCK_SIZE) * sizeof(unsigned char)); memset (matrix->csr_ptr, 0, (matrix->numtile * BLOCK_SIZE) * sizeof(unsigned char)); gettimeofday(&t1, NULL); // step2_kernel_new(matrixA->m, matrixA->n, matrixA->rowpointer, matrixA->columnidx, // matrixA->tilem, matrixA->tilen, matrixA->tile_ptr, matrixA->tile_columnidx, matrixA->tile_nnz, csr_ptr, matrixA->numtile); step2_kernel_new(matrix, matrix->csr_ptr); gettimeofday(&t2, NULL); double time_step2 = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; printf("transform_step2 runtime = %4.5f ms\n", time_step2); exclusive_scan(matrix->tile_nnz, matrix->numtile +1); //format 0-7 represent 7 formats: CSR, COO, ELL, HYB, Dns, DnsRow, DnsCol matrix->Format =(char *)malloc(matrix->numtile* sizeof(char)); memset(matrix->Format,0,matrix->numtile * sizeof(char)); matrix->blknnz = (int *)malloc((matrix->numtile + 1)* sizeof(int)); //space cost that need allocate memset(matrix->blknnz,0,(matrix->numtile + 1) * sizeof(int)); //dense int dense_size=0; matrix->dns_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->dns_offset, 0, (matrix->numtile+1) * sizeof(int)); //denserow matrix->denserowptr = (int *)malloc((matrix->numtile + 1) * sizeof(int)); memset(matrix->denserowptr,0,(matrix->numtile+ 1) * sizeof(int)); int denserow_size =0 ; matrix->dnsrow_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->dnsrow_offset, 0, (matrix->numtile+1) * sizeof(int)); //densecolumn matrix->densecolptr = (int *)malloc((matrix->numtile + 1) * sizeof(int)); memset(matrix->densecolptr,0,(matrix->numtile+ 1) * sizeof(int)); int densecol_size =0 ; matrix->dnscol_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->dnscol_offset, 0, (matrix->numtile+1) * sizeof(int)); //CSR int csrsize=0; // int csrptrlen=0; matrix->csr_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->csr_offset, 0, (matrix->numtile+1) * sizeof(int)); matrix->csrptr_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->csrptr_offset, 0, (matrix->numtile+1) * sizeof(int)); //ELL int ellsize =0; matrix->ell_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->ell_offset, 0, (matrix->numtile+1) * sizeof(int)); //COO int coosize =0; matrix->coo_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->coo_offset, 0, (matrix->numtile+1) * sizeof(int)); //HYB int hybellsize =0; int hybcoosize =0; int hybsize =0; matrix->blkwidth = (char *)malloc(matrix->numtile*sizeof(char)); memset(matrix->blkwidth,0,matrix->numtile * sizeof(char)) ; matrix->hyb_coocount= (int *)malloc((matrix->numtile + 1) * sizeof(int)); memset(matrix->hyb_coocount,0,(matrix->numtile + 1) * sizeof(int)) ; matrix->hyb_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->hyb_offset, 0, (matrix->numtile+1) * sizeof(int)); *new_coocount_temp = (int *)malloc((matrix->numtile + 1) * sizeof(int)); memset(*new_coocount_temp,0,(matrix->numtile + 1) * sizeof(int)) ; int *new_coocount = *new_coocount_temp; gettimeofday(&t1, NULL); // step3_kernel_new(matrixA->m, matrixA->n, matrixA->rowpointer, matrixA->columnidx, // matrixA->tilem, matrixA->tilen, matrixA->numtile, matrixA->tile_ptr, matrixA->tile_columnidx, matrixA->tile_nnz, matrixA->Format, // csr_ptr, matrixA->blknnz, matrixA->blkwidth, hyb_coocount, // matrixA->denserowptr, matrixA->densecolptr, // csr_offset, csrptr_offset, coo_offset, ell_offset, hyb_offset, dns_offset, dnsrow_offset, dnscol_offset); step3_kernel_new(matrix, new_coocount); gettimeofday(&t2, NULL); double time_step3 = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; printf("transform_step3 runtime = %4.5f ms\n", time_step3); exclusive_scan(matrix->csr_offset, matrix->numtile +1); exclusive_scan(matrix->csrptr_offset, matrix->numtile +1); exclusive_scan(matrix->coo_offset, matrix->numtile +1); exclusive_scan(matrix->ell_offset, matrix->numtile +1); exclusive_scan(matrix->hyb_offset, matrix->numtile +1); exclusive_scan(matrix->dns_offset, matrix->numtile +1); exclusive_scan(matrix->dnsrow_offset, matrix->numtile +1); exclusive_scan(matrix->dnscol_offset, matrix->numtile +1); exclusive_scan(matrix->denserowptr,matrix->numtile+1); exclusive_scan(matrix->densecolptr,matrix->numtile+1); exclusive_scan(matrix->hyb_coocount, matrix->numtile +1); hybcoosize = matrix->hyb_coocount[matrix->numtile]; exclusive_scan(new_coocount, matrix->numtile +1); matrix->coocount = new_coocount[ matrix->numtile]; for (int blki=0;blki<matrix->tilem;blki++) { int rowlength= blki==matrix->tilem-1 ? matrix->m-(matrix->tilem-1)*BLOCK_SIZE : BLOCK_SIZE ; int rowbnum=matrix->tile_ptr[blki+1]-matrix->tile_ptr[blki]; for (int bi=0;bi<rowbnum;bi++) { char format= matrix->Format[matrix->tile_ptr[blki]+bi]; switch (format) { case 0: //csr csrsize += matrix->blknnz[matrix->tile_ptr[blki]+bi]; // csrptrlen += rowlength ; break; case 1: //coo coosize += matrix->blknnz[matrix->tile_ptr[blki]+bi]; break; case 2: //ell ellsize += matrix->blknnz[matrix->tile_ptr[blki]+bi] ; break; case 3: //hyb hybsize += matrix->blknnz[matrix->tile_ptr[blki]+bi]; hybellsize += matrix->blkwidth[matrix->tile_ptr[blki]+bi] * rowlength; break; case 4: dense_size += matrix->blknnz[matrix->tile_ptr[blki]+bi]; break; case 5: denserow_size += matrix->blknnz[matrix->tile_ptr[blki]+bi]; break; case 6: densecol_size += matrix->blknnz[matrix->tile_ptr[blki]+bi]; break; default: break; } } } exclusive_scan(matrix->blknnz,(matrix->numtile+1)); int *formatnum = (int *)malloc(7 * sizeof(int)); memset(formatnum,0,7 * sizeof(int)); for (int j=0;j<7;j++) { for (int i=0;i<matrix->numtile;i++) { if (matrix->Format[i]==j) { formatnum[j]++; // printf("%d ",Format[i]); // break ; } } } for (int j=0;j<7;j++) { printf("format =%i,count =%i\n",j,formatnum[j]); } int csrtilecount = formatnum[0]; int nnz_temp =0; int tile_count_temp =0; for (int blki =0;blki < matrix->tilem; blki ++) { int start= blki*BLOCK_SIZE; int end = blki==matrix->tilem-1 ? matrix->m : (blki+1)*BLOCK_SIZE ; nnz_temp = nnz_temp < matrix->rowpointer[end] - matrix->rowpointer[start] ? matrix->rowpointer[end] - matrix->rowpointer[start] : nnz_temp; tile_count_temp = tile_count_temp < matrix->tile_ptr[blki +1] - matrix->tile_ptr[blki] ? matrix->tile_ptr[blki +1] - matrix->tile_ptr[blki] : tile_count_temp; } //CSR matrix->Tile_csr_Val=(MAT_VAL_TYPE*)malloc((csrsize)*sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_csr_Val, 0, (csrsize)*sizeof(MAT_VAL_TYPE)); matrix->Tile_csr_Col=(unsigned char*)malloc((csrsize)*sizeof(unsigned char)); memset(matrix->Tile_csr_Col, 0, (csrsize)*sizeof(unsigned char)); matrix->Tile_csr_Ptr=(unsigned char*)malloc((csrtilecount * BLOCK_SIZE)*sizeof(unsigned char)); memset(matrix->Tile_csr_Ptr, 0, (csrtilecount * BLOCK_SIZE )*sizeof(unsigned char)); //COO matrix->Tile_coo_Val=(MAT_VAL_TYPE*)malloc((coosize)*sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_coo_Val, 0, (coosize)*sizeof(MAT_VAL_TYPE)); matrix->Tile_coo_colIdx=(unsigned char*)malloc((coosize)*sizeof(unsigned char)); memset(matrix->Tile_coo_colIdx, 0, (coosize)*sizeof(unsigned char)); matrix->Tile_coo_rowIdx=(unsigned char*)malloc((coosize)*sizeof(unsigned char)); memset(matrix->Tile_coo_rowIdx, 0, (coosize)*sizeof(unsigned char)); //ELL matrix->Tile_ell_Val=(MAT_VAL_TYPE*)malloc((ellsize)*sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_ell_Val,0,(ellsize)*sizeof(MAT_VAL_TYPE)); matrix->Tile_ell_colIdx=(unsigned char*)malloc((ellsize)*sizeof(unsigned char)); memset(matrix->Tile_ell_colIdx, 0, sizeof(unsigned char) * ellsize); //HYB matrix->Tile_hyb_Val=(MAT_VAL_TYPE*)malloc((hybellsize+hybcoosize)*sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_hyb_Val,0,(hybellsize+hybcoosize)*sizeof(MAT_VAL_TYPE)); matrix->Tile_hyb_ellcolIdx=(unsigned char*)malloc((hybellsize+hybcoosize)*sizeof(unsigned char)); matrix->Tile_hyb_coorowIdx=(unsigned char*)malloc((hybcoosize)*sizeof(unsigned char)) ; memset(matrix->Tile_hyb_ellcolIdx, 0, sizeof(unsigned char) * (hybellsize+hybcoosize)); memset(matrix->Tile_hyb_coorowIdx, 0, sizeof(unsigned char) * hybcoosize); //dense matrix->Tile_dns_Val=(MAT_VAL_TYPE*)malloc((dense_size)*sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_dns_Val,0,dense_size * sizeof(MAT_VAL_TYPE)); //dense row matrix->Tile_dnsrow_Val=(MAT_VAL_TYPE*)malloc((denserow_size) * sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_dnsrow_Val,0,denserow_size * sizeof(MAT_VAL_TYPE)); matrix->Tile_dnsrow_idx = (char *)malloc(matrix->denserowptr[matrix->numtile] * sizeof(char)); memset(matrix->Tile_dnsrow_idx, 0, matrix->denserowptr[matrix->numtile] * sizeof(char)); //dense column matrix->Tile_dnscol_Val=(MAT_VAL_TYPE*)malloc((densecol_size) * sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_dnscol_Val, 0, densecol_size * sizeof(MAT_VAL_TYPE)); matrix->Tile_dnscol_idx = (char *)malloc(matrix->densecolptr[matrix->numtile] * sizeof(char)); memset(matrix->Tile_dnscol_idx, 0, matrix->densecolptr[matrix->numtile] * sizeof(char)); //extract COO to a new matrix matrix->coocount = hybcoosize + coosize; *new_coo_value_temp = (MAT_VAL_TYPE*)malloc(matrix->coocount *sizeof(MAT_VAL_TYPE)); memset(*new_coo_value_temp, 0, (matrix->coocount) *sizeof(MAT_VAL_TYPE)); MAT_VAL_TYPE *new_coo_value = *new_coo_value_temp; *new_coo_rowidx_temp = (int *)malloc((hybcoosize+ coosize) *sizeof(int)); memset(*new_coo_rowidx_temp, 0, (hybcoosize+coosize) *sizeof(int)); int *new_coo_rowidx = *new_coo_rowidx_temp; *new_coo_colidx_temp = (int *)malloc((matrix->coocount) *sizeof(int)); memset(*new_coo_colidx_temp, 0, (matrix->coocount) *sizeof(int)); int *new_coo_colidx = *new_coo_colidx_temp; //mask matrix->mask = (unsigned short *)malloc(matrix->numtile * BLOCK_SIZE * sizeof(unsigned short)); memset(matrix->mask, 0, matrix->numtile * BLOCK_SIZE * sizeof(unsigned short)); gettimeofday(&t1, NULL); // step4_kernel(matrixA->m, matrixA->n, matrixA->rowpointer, matrixA->columnidx, matrixA->value, // matrixA->tilem, matrixA->tilen, matrixA->numtile, matrixA->tile_ptr, matrixA->tile_columnidx, matrixA->tile_nnz, matrixA->Format, // matrixA->blknnz, csr_ptr, nnz_temp, tile_count_temp, // matrixA->Tile_csr_Val, matrixA->Tile_csr_Col, matrixA->Tile_csr_Ptr, csr_offset, csrptr_offset, // matrixA->Tile_coo_Val, matrixA->Tile_coo_colIdx, matrixA->Tile_coo_rowIdx, coo_offset, // matrixA->Tile_ell_Val, matrixA->Tile_ell_colIdx, matrixA->blkwidth, ell_offset, // matrixA->Tile_hyb_Val, matrixA->Tile_hyb_ellcolIdx, matrixA->Tile_hyb_coorowIdx, hyb_coocount, hyb_offset, // matrixA->Tile_dns_Val, dns_offset, // matrixA->Tile_dnsrow_Val, matrixA->Tile_dnsrow_idx, matrixA->denserowptr, dnsrow_offset, // matrixA->Tile_dnscol_Val, matrixA->Tile_dnscol_idx, matrixA->densecolptr, dnscol_offset); step4_kernel(matrix, matrix->csr_ptr, matrix->hyb_coocount, nnz_temp, tile_count_temp, matrix->csr_offset, matrix->csrptr_offset, matrix->coo_offset, matrix->ell_offset, matrix->hyb_offset, matrix->dns_offset, matrix->dnsrow_offset, matrix->dnscol_offset, new_coo_value,new_coo_colidx, new_coo_rowidx, new_coocount); gettimeofday(&t2, NULL); double time_step4 = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; printf("transform_step4 runtime = %4.5f ms\n", time_step4); } #endif
#ifndef FORMAT_TRANS #define FORMAT_TRANS #include"common.h" #include"mmio_highlevel.h" //#include"mmio.h" #include"utils.h" //calculate the number of non-empty tiles of matrix A void step1_kernel(Beidou_Tile_Matrix *matrix) // (int m, int n, MAT_PTR_TYPE *rowpointer, int *columnidx, // int tilem, int tilen, MAT_PTR_TYPE *tile_ptr, int *numtile) { int *rowpointer=matrix->rowpointer; int m = matrix->m; int n = matrix->n; int *columnidx = matrix->columnidx; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int numtile = matrix->numtile; unsigned thread = omp_get_max_threads(); printf("threads=%i\n",thread); char *flag_g=(char *)malloc(thread*tilen * sizeof(char)); for (int blki = 0; blki < tilem; blki ++) { int thread_id = omp_get_thread_num(); // printf("id =%d\n",thread_id); char *flag = flag_g+ thread_id * tilen; memset(flag,0,tilen * sizeof(char)); int start = blki *BLOCK_SIZE; int end = blki == tilem-1 ? m : (blki+1)* BLOCK_SIZE ; for (int j = rowpointer[start]; j < rowpointer[end]; j ++) { int jc = columnidx[j] / BLOCK_SIZE; if (flag[jc]==0) { flag[jc]=1; tile_ptr[blki]++; } } // free(flag); } free(flag_g); } void step2_kernel(Beidou_Tile_Matrix *matrix) // (int rowA, int coA, int *rowpointerA, int *columnindexA, // int tilem, int tilenA, MAT_PTR_TYPE *tile_ptr, int *tile_columnidx, int numtileA) { int *rowpointer=matrix->rowpointer; int *columnidx = matrix->columnidx; int m = matrix->m; int n = matrix->n; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int numtile = matrix->numtile; int *tile_columnidx = matrix->columnidx; int colid =0; char *flag=(char *)malloc(tilen * sizeof(char)); for (int i=0;i<tilem;i++) { memset(flag,0,tilen*sizeof(char)); int start= i*BLOCK_SIZE; int end = i== tilem-1 ? m : (i+1)*BLOCK_SIZE ; for (int j=rowpointer[start];j< rowpointer[end];j++) { int jc=columnidx[j]/BLOCK_SIZE; if (flag[jc]==0) { flag[jc]=1; tile_ptr[i+1]++; tile_columnidx[colid]=jc; colid++; } } } for (int i=1;i<tilem+1;i++) { tile_ptr[i] += tile_ptr[i-1]; } } //determine the tile structure (tileptr , tile columnidx and tile_nnz) of matrix A. void step2_kernel_new (Beidou_Tile_Matrix *matrix, unsigned char *tile_csr_ptr) // (int m, int n, int *rowpointer, int *columnidx, // int tilem, int tilen, MAT_PTR_TYPE *tile_ptr, int *tile_columnidx, int *tile_nnz, // unsigned char *tile_csr_ptr, int numtile) { int m = matrix->m; int n = matrix->n; int *rowpointer=matrix->rowpointer; int *columnidx = matrix->columnidx; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int *tile_columnidx = matrix->tile_columnidx; int *tile_nnz = matrix->tile_nnz; int numtile = matrix->numtile; unsigned thread = omp_get_max_threads(); char *col_temp_g=(char *)malloc((thread * tilen) * sizeof(char)); int *nnz_temp_g=(int *)malloc((thread * tilen) * sizeof(int)); unsigned char *ptr_per_tile_g = (unsigned char *)malloc((thread * tilen * BLOCK_SIZE) * sizeof(unsigned char)); for (int blki = 0; blki < tilem; blki ++) { int thread_id = omp_get_thread_num(); char *col_temp = col_temp_g + thread_id * tilen; memset(col_temp,0,tilen * sizeof(char)); int *nnz_temp = nnz_temp_g + thread_id * tilen; memset(nnz_temp,0,tilen * sizeof(int)); unsigned char *ptr_per_tile = ptr_per_tile_g + thread_id * tilen * BLOCK_SIZE; memset(ptr_per_tile, 0, tilen * BLOCK_SIZE * sizeof(unsigned char)); int pre_tile = tile_ptr[blki]; int rowlen = blki==tilem-1 ? m-(tilem-1)*BLOCK_SIZE : BLOCK_SIZE ; int start= blki * BLOCK_SIZE; int end = blki==tilem-1 ? m : (blki +1)*BLOCK_SIZE ; for (int ri=0 ; ri < rowlen ; ri ++) { for (int j=rowpointer[start + ri];j<rowpointer[start + ri +1];j++) { int jc = columnidx[j]/BLOCK_SIZE; col_temp[jc] = 1; nnz_temp[jc] ++; ptr_per_tile[jc * BLOCK_SIZE + ri] ++; } } int count =0; for (int blkj=0 ;blkj < tilen; blkj++) { if (col_temp[blkj] == 1) { tile_columnidx[pre_tile + count] = blkj; tile_nnz[pre_tile + count] = nnz_temp[blkj]; for (int ri =0; ri < rowlen ; ri ++) { tile_csr_ptr[(pre_tile + count) * BLOCK_SIZE + ri] = ptr_per_tile[blkj * BLOCK_SIZE + ri]; } count ++; } } } free(col_temp_g); free(nnz_temp_g); free(ptr_per_tile_g); } void step3_kernel_new(Beidou_Tile_Matrix *matrix, int *new_coocount) { int *rowpointer=matrix->rowpointer; int *columnidx = matrix->columnidx; int m = matrix->m; int n = matrix->n; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int numtile = matrix->numtile; int *tile_columnidx = matrix->tile_columnidx; int *tile_nnz = matrix->tile_nnz; char *Format = matrix->Format; int *blknnz = matrix->blknnz; char *blkwidth = matrix->blkwidth; int *denserowptr = matrix->denserowptr; int *densecolptr = matrix->densecolptr; unsigned char *tile_csr_ptr = matrix->csr_ptr; int *hyb_coocount = matrix->hyb_coocount; int *csr_offset = matrix->csr_offset; int *csrptr_offset = matrix->csrptr_offset; int *coo_offset = matrix->coo_offset; int *ell_offset = matrix->ell_offset; int *hyb_offset = matrix->hyb_offset; int *dns_offset = matrix->dns_offset; int *dnsrow_offset = matrix->dnsrow_offset; int *dnscol_offset = matrix->dnscol_offset; for (int blki=0;blki<tilem;blki++) { int tilenum_per_row=tile_ptr[blki+1]-tile_ptr[blki]; int rowlen= blki==tilem-1 ? m-(tilem-1)*BLOCK_SIZE : BLOCK_SIZE ; for (int bi=0;bi<tilenum_per_row;bi++) { int collen = tile_columnidx[tile_ptr[blki]+bi] == tilen-1 ? n - (tilen-1 ) * BLOCK_SIZE : BLOCK_SIZE ; int tile_id = tile_ptr[blki]+bi; int tilennz = tile_nnz[tile_id +1] - tile_nnz[tile_id]; int nnzthreshold = rowlen * collen * 0.5 ; // if (1) // { // Format[tile_id] =0 ; // blknnz[tile_id] = tilennz ; // csr_offset[tile_id] = tilennz; // csrptr_offset[tile_id] = rowlen; // } if (tilennz >= nnzthreshold) //if the number of nnz is more than 128, then dense { Format[tile_id] = 4 ; blknnz[tile_id] = rowlen * collen; dns_offset[tile_id] = rowlen * collen; continue; } if (tilennz <= COO_THRESHOLD) //else if the number of nnz is less than 12, then coo { Format[tile_id] = 1 ; blknnz[tile_id] = tilennz; coo_offset[tile_id] = tilennz; new_coocount[tile_id] = tilennz; continue; } else if (tilennz % collen ==0 || tilennz % rowlen ==0) { int dnsrowflag =0 ; int numdnsrow =0; int dnscolflag =0; int numdnscol =0; for (int ri=0;ri < rowlen ;ri++) { if (tile_csr_ptr[tile_id * BLOCK_SIZE + ri] % collen !=0) { dnsrowflag =0; break; } else { if (tile_csr_ptr[tile_id * BLOCK_SIZE + ri] == collen) { dnsrowflag =1; numdnsrow ++ ; } } } if (dnsrowflag == 1) { Format[tile_id] = 5 ; //Dense Row denserowptr[tile_id] = numdnsrow ; blknnz[tile_id] = numdnsrow * collen; dnsrow_offset[tile_id] = numdnsrow * collen; continue; } else { int start = blki*BLOCK_SIZE; int end = blki==tilem-1 ? m : (blki+1)*BLOCK_SIZE ; int jc = tile_columnidx[tile_id]; unsigned char *dnscol_colidx_temp= (unsigned char *)malloc(tilennz * sizeof(unsigned char)); memset(dnscol_colidx_temp, -1, tilennz * sizeof(unsigned char)); // int k=0; unsigned char *col_flag =(unsigned char *)malloc(collen * sizeof(unsigned char)); memset(col_flag, 0, collen * sizeof(unsigned char)); for (int blkj = rowpointer[start]; blkj < rowpointer[end]; blkj ++) { int jc_temp = columnidx[blkj]/BLOCK_SIZE; if (jc_temp == jc) { int col_temp = columnidx[blkj] - jc * BLOCK_SIZE; col_flag[col_temp] ++; // dnscol_colidx_temp[k]= columnindexA[blkj] - jc * BLOCK_SIZE; // if (tile_id == 389) // printf("colidx = %i\n", dnscol_colidx_temp[k]); // k++; } } for (int j =0; j < collen; j ++) { if (col_flag[j] % rowlen !=0) { dnscolflag =0; break; } else { if (col_flag[j] == rowlen) { dnscolflag =1; numdnscol ++ ; } } } if (dnscolflag == 1) { // printf("numdnscol = %i\n", numdnscol); Format[tile_id] = 6 ; //Dense Col densecolptr[tile_id] = numdnscol ; blknnz[tile_id] = numdnscol * rowlen; dnscol_offset[tile_id] = numdnscol * rowlen; continue; } // unsigned char *trans_ptr= (unsigned char *)malloc(collen * sizeof(unsigned char)); // memset(trans_ptr, 0, collen * sizeof(unsigned char)); // for (int ni =0; ni < tilennz; ni ++) // { // int coltemp = dnscol_colidx_temp[ni]; // trans_ptr[coltemp]++; // } // for (int ri=0;ri < rowlen ;ri++) // { // if (trans_ptr[ri] % rowlen !=0) // { // dnscolflag =0; // break; // } // else // { // if (trans_ptr[ri] == rowlen) // { // dnscolflag =1; // numdnscol ++ ; // } // } // } // if (dnscolflag == 1) // { // // printf("numdnscol = %i\n", numdnscol); // Format[tile_id] = 6 ; //Dense Col // densecolptr[tile_id] = numdnscol ; // blknnz[tile_id] = numdnscol * rowlen; // dnscol_offset[tile_id] = numdnscol * rowlen; // continue; // } } } if (Format[tile_id] != 5 && Format[tile_id] !=6) { int bwidth=0; for (int blkj=0;blkj<rowlen;blkj++) { if (bwidth < tile_csr_ptr[tile_id * BLOCK_SIZE + blkj] ) bwidth = tile_csr_ptr[tile_id * BLOCK_SIZE + blkj] ; } double row_length_mean = ((double)tilennz) / rowlen; double variance = 0.0; double row_length_skewness = 0.0; for (int row = 0; row < rowlen; ++row) { int length = tile_csr_ptr[tile_id * BLOCK_SIZE + row]; double delta = (double)(length - row_length_mean); variance += (delta * delta); row_length_skewness += (delta * delta * delta); } variance /= rowlen; double row_length_std_dev = sqrt(variance); row_length_skewness = (row_length_skewness / rowlen) / pow(row_length_std_dev, 3.0); double row_length_variation = row_length_std_dev / row_length_mean; double ell_csr_threshold = 0.2; double csr_hyb_threshold = 1.0; if (row_length_variation <= ell_csr_threshold) // if variation is less than 0.2, then ELL { Format[tile_id] = 2; blkwidth[tile_id]=bwidth; blknnz[tile_id] = bwidth * rowlen ; ell_offset[tile_id] = bwidth * rowlen; } else { int hybwidth=bwidth; int iopriorsize= bwidth * rowlen * sizeof (MAT_VAL_TYPE) + bwidth * rowlen * sizeof (unsigned char) ; // bwidth * rowlen * sizeof (MAT_VAL_TYPE) + bwidth * rowlen * sizeof (char) /2 +1 ; int ionextsize; int coonextnum=0; int coopriornum=0; for (int wi=bwidth-1;wi>0;wi--) { coonextnum=0; for (int blkj=0;blkj<rowlen;blkj++) { if ( tile_csr_ptr[tile_id * BLOCK_SIZE + blkj]> wi) { coonextnum += tile_csr_ptr[tile_id * BLOCK_SIZE + blkj] - wi ; } } ionextsize= wi * rowlen * sizeof (MAT_VAL_TYPE )+ wi * rowlen * sizeof (unsigned char) + coonextnum * (sizeof (MAT_VAL_TYPE) + sizeof (unsigned char)) ; // wi * rowlen * sizeof (MAT_VAL_TYPE )+ wi * rowlen * sizeof (char) /2 + 1 + coonextnum * (sizeof (MAT_VAL_TYPE) + sizeof (char)) ; if (iopriorsize<=ionextsize) { hybwidth=wi+1; break; } else { hybwidth = wi; iopriorsize=ionextsize; coopriornum=coonextnum; } } if (row_length_variation >= csr_hyb_threshold )//&& coopriornum <= 4) // if variation > 1.0, and the number of coo data <=4, then HYB { Format[tile_id] = 3; hyb_coocount[tile_id] = coopriornum; blkwidth[tile_id]=hybwidth; blknnz[tile_id] = coopriornum + hybwidth * rowlen ; hyb_offset[tile_id] = coopriornum + hybwidth * rowlen; new_coocount[tile_id] = coopriornum; } else //else CSR { Format[tile_id] =0 ; blknnz[tile_id] = tilennz ; csr_offset[tile_id] = tilennz; csrptr_offset[tile_id] = BLOCK_SIZE; } } } } } } void step4_kernel(Beidou_Tile_Matrix *matrix, unsigned char *csr_ptr, int *hyb_coocount, int nnz_temp, int tile_count_temp, int *csr_offset, int *csrptr_offset, int *coo_offset, int *ell_offset, int *hyb_offset, int *dns_offset, int *dnsrow_offset, int *dnscol_offset, MAT_VAL_TYPE *new_coo_value, int *new_coo_colidx, int *new_coo_rowidx, int *new_coocount) // (int m, int n, int *rowpointer, int *columnidx, MAT_VAL_TYPE *value, // int tilem, int tilen, int numtile, MAT_PTR_TYPE *tile_ptr, int *tile_columnidx, int *tile_nnz, char *Format, // int *blknnz, unsigned char *csr_ptr, int nnz_temp, int tile_count_temp, // MAT_VAL_TYPE *Tile_csr_Val, unsigned char *Tile_csr_Col, unsigned char *Tile_csr_Ptr, int *csr_offset, int *csrptr_offset, // MAT_VAL_TYPE *Tile_coo_Val, unsigned char *Tile_coo_colIdx, unsigned char *Tile_coo_rowIdx, int *coo_offset, // MAT_VAL_TYPE *Tile_ell_Val, unsigned char *Tile_ell_colIdx, char *blkwidth, int *ell_offset, // MAT_VAL_TYPE *Tile_hyb_Val, unsigned char *Tile_hyb_ellcolIdx, unsigned char *Tile_hyb_coorowIdx, int *hyb_coocount, int *hyb_offset, // MAT_VAL_TYPE *Tile_dns_Val, int *dns_offset, // MAT_VAL_TYPE *Tile_dnsrow_Val, char *Tile_dnsrow_idx, int * denserowptr, int *dnsrow_offset, // MAT_VAL_TYPE *Tile_dnscol_Val, char *Tile_dnscol_idx, int *densecolptr, int *dnscol_offset){ { int *rowpointer=matrix->rowpointer; int *columnidx = matrix->columnidx; MAT_VAL_TYPE *value = matrix->value; int m = matrix->m; int n = matrix->n; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int numtile = matrix->numtile; int *tile_columnidx = matrix->tile_columnidx; int *tile_nnz = matrix->tile_nnz; char *Format = matrix->Format; int *blknnz = matrix->blknnz; char *blkwidth = matrix->blkwidth; MAT_VAL_TYPE *Tile_csr_Val = matrix->Tile_csr_Val; unsigned char *Tile_csr_Col = matrix->Tile_csr_Col; unsigned char *Tile_csr_Ptr = matrix->Tile_csr_Ptr; MAT_VAL_TYPE *Tile_coo_Val = matrix->Tile_coo_Val; unsigned char *Tile_coo_colIdx = matrix->Tile_coo_colIdx; unsigned char *Tile_coo_rowIdx = matrix->Tile_coo_rowIdx; MAT_VAL_TYPE *Tile_ell_Val = matrix->Tile_ell_Val; unsigned char *Tile_ell_colIdx = matrix->Tile_ell_colIdx; MAT_VAL_TYPE *Tile_hyb_Val = matrix->Tile_hyb_Val; unsigned char *Tile_hyb_ellcolIdx = matrix->Tile_hyb_ellcolIdx; unsigned char *Tile_hyb_coorowIdx = matrix->Tile_hyb_coorowIdx; MAT_VAL_TYPE *Tile_dns_Val = matrix->Tile_dns_Val; MAT_VAL_TYPE *Tile_dnsrow_Val = matrix->Tile_dnsrow_Val; char *Tile_dnsrow_idx = matrix->Tile_dnsrow_idx; MAT_VAL_TYPE *Tile_dnscol_Val = matrix->Tile_dnscol_Val; char *Tile_dnscol_idx = matrix->Tile_dnscol_idx; int *denserowptr = matrix->denserowptr; int *densecolptr = matrix->densecolptr; unsigned short *mask = matrix->mask; unsigned thread = omp_get_max_threads(); unsigned char *csr_colidx_temp_g=(unsigned char*)malloc((thread * nnz_temp )*sizeof(unsigned char)); MAT_VAL_TYPE *csr_val_temp_g=(MAT_VAL_TYPE*)malloc((thread * nnz_temp)*sizeof(MAT_VAL_TYPE)); int *tile_count_g = (int *)malloc(thread * tile_count_temp * sizeof(int)); //for each tile for (int blki=0;blki<tilem;blki++) { int thread_id = omp_get_thread_num(); unsigned char *csr_colidx_temp = csr_colidx_temp_g + thread_id * nnz_temp; MAT_VAL_TYPE *csr_val_temp = csr_val_temp_g + thread_id * nnz_temp; int *tile_count = tile_count_g + thread_id * tile_count_temp; // unsigned char *csr_colidx_temp = (unsigned char *)malloc((nnz_temp )*sizeof(unsigned char)); // MAT_VAL_TYPE *csr_val_temp = (MAT_VAL_TYPE *)malloc((nnz_temp)*sizeof(MAT_VAL_TYPE)); // int *tile_count = (int *)malloc(tile_count_temp * sizeof(int)); memset(csr_colidx_temp, 0, (nnz_temp)*sizeof(unsigned char)); memset(csr_val_temp, 0, (nnz_temp)*sizeof(MAT_VAL_TYPE)); memset(tile_count, 0, (tile_count_temp)*sizeof(int)); int tilenum_per_row=tile_ptr[blki+1]-tile_ptr[blki]; int rowlen= blki==tilem-1 ? m-(tilem-1)*BLOCK_SIZE : BLOCK_SIZE ; int start = blki*BLOCK_SIZE; int end = blki==tilem-1 ? m : (blki+1)*BLOCK_SIZE ; // if (blki == 978) // { // printf("thread_id= ,tilenum_per_row=%i, nnz = %i\n", tilenum_per_row, rowpointerA[end]-rowpointerA[start]); // printf("start = %i, end = %i\n",start, end); // } for (int blkj = rowpointer[start]; blkj < rowpointer[end]; blkj ++) { int jc_temp = columnidx[blkj]/BLOCK_SIZE; // printf("blkj = %i,col=%i\n", blkj, jc_temp); for (int bi = 0; bi < tilenum_per_row; bi ++) { int tile_id = tile_ptr[blki]+bi; int jc = tile_columnidx[tile_id]; int pre_nnz = tile_nnz[tile_id] - tile_nnz[tile_ptr[blki]]; if (jc == jc_temp) { csr_val_temp[pre_nnz + tile_count[bi]] = value[blkj]; csr_colidx_temp[pre_nnz + tile_count[bi]] = columnidx[blkj] - jc * BLOCK_SIZE; // printf("tile_id = %i, tilennz = %i, jc = %i, prennz = %i, val[%i]=%f,col_before= %i, col[] = %i\n",tile_id, tilennz, jc, pre_nnz,pre_nnz + tile_count[bi],csr_val_temp[pre_nnz + tile_count[bi]], columnindexA[blkj],csr_colidx_temp[pre_nnz + tile_count[bi]]); tile_count[bi] ++; break; } } } for (int bi = 0; bi < tilenum_per_row; bi ++) { int tile_id = tile_ptr[blki]+bi; int pre_nnz = tile_nnz[tile_id] - tile_nnz[tile_ptr[blki]]; int tilennz = tile_nnz[tile_id +1] - tile_nnz[tile_id]; //blknnz[tile_id+1] - blknnz[tile_id] ; int collen = tile_columnidx[tile_id] == tilen-1 ? n - (tilen-1 ) * BLOCK_SIZE : BLOCK_SIZE ; int format = Format[tile_id]; switch (format) { case 0: { int offset = csr_offset[tile_id]; int ptr_offset = csrptr_offset[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); for (int ri =0; ri < rowlen; ri ++) { int start = ptr_temp[ri]; int stop = ri == rowlen -1 ? tilennz : ptr_temp[ ri +1];; for (int k =start; k < stop; k ++) { unsigned char colidx = csr_colidx_temp[pre_nnz + k]; Tile_csr_Val[offset + k] = csr_val_temp[pre_nnz + k]; Tile_csr_Col[offset + k] = csr_colidx_temp[pre_nnz + k]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } Tile_csr_Ptr[ptr_offset+ ri] = ptr_temp[ri]; } // for (int k = 0; k < tilennz; k++) // { // Tile_csr_Val[offset + k] = csr_val_temp[pre_nnz + k]; // Tile_csr_Col[offset + k] = csr_colidx_temp[pre_nnz + k]; // } // //CSR ptr // for (int pid=0; pid<rowlen; pid++) // { // Tile_csr_Ptr[ptr_offset+ pid] = ptr_temp[pid]; // mask[tile_id * BLOCK_SIZE + pid] |= (0x1 << (BLOCK_SIZE - colidx - 1)); // // printf("tile_csr_ptr = %i, csr_ptr = %i\n", Tile_csr_Ptr[ptr_offset+ pid] , csr_ptr[tile_id * BLOCK_SIZE + pid]); // } // unsigned char old_val = Tile_csr_Ptr[ptr_offset]; // unsigned char new_val; // Tile_csr_Ptr[ptr_offset] =0; // for (int pid =1; pid < BLOCK_SIZE; pid ++) // { // new_val = Tile_csr_Ptr[ptr_offset+pid]; // Tile_csr_Ptr[ptr_offset+pid] = old_val + Tile_csr_Ptr[ptr_offset+pid -1]; // old_val = new_val; // } break; } case 1: { if(SPMV && !SPGEMM) { // printf("do spmv operation\n"); int colidx_temp = tile_columnidx[tile_id]; int offset_new = new_coocount[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ ri +1]; for (int j = ptr_temp[ri]; j < nnz_end; j++) { new_coo_rowidx[offset_new + j] = ri + blki * BLOCK_SIZE; new_coo_value[offset_new + j] = csr_val_temp[pre_nnz + j] ; new_coo_colidx[offset_new + j]=csr_colidx_temp[pre_nnz + j] + colidx_temp * BLOCK_SIZE; } } } if(SPGEMM && !SPMV) { // printf("do spgemm operation\n"); int offset = coo_offset[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, rowlen); for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ ri +1]; for (int j = ptr_temp[ ri]; j < nnz_end; j++) { unsigned char colidx = csr_colidx_temp[pre_nnz + j]; Tile_coo_rowIdx[offset+ j] = ri; Tile_coo_Val[offset + j] = csr_val_temp[pre_nnz + j] ; Tile_coo_colIdx[offset + j]=csr_colidx_temp[pre_nnz + j]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } } break; } case 2: { int offset = ell_offset[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ri +1]; for (int j = ptr_temp[ri]; j < nnz_end; j++) { int colidx = csr_colidx_temp[pre_nnz + j]; int temp = j - ptr_temp[ri]; Tile_ell_colIdx[offset + temp * rowlen + ri] = csr_colidx_temp[pre_nnz + j]; Tile_ell_Val[offset + temp * rowlen + ri] = csr_val_temp[pre_nnz + j]; // mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } for (int ri =0; ri < rowlen; ri ++) { for (int bi = 0; bi < blkwidth[tile_id]; bi ++) { int colidx = Tile_ell_colIdx[offset + bi * rowlen + ri]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } break; } case 3: { int colidx_temp = tile_columnidx[tile_id]; int offset = hyb_offset[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); int offset_new = new_coocount[tile_id]; int coocount=0; for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ri +1]; int stop= (nnz_end- ptr_temp[ri]) <= blkwidth[tile_id] ? nnz_end : ptr_temp[ri] + blkwidth[tile_id] ; for (int j = ptr_temp[ri]; j < stop; j++) { int colidx = csr_colidx_temp[pre_nnz + j]; // printf("row = %i, j = %i, colidx = %i\n",ri,j,colidx); int temp = j - ptr_temp[ri]; Tile_hyb_ellcolIdx[offset + temp * rowlen + ri] = csr_colidx_temp[pre_nnz + j]; Tile_hyb_Val[offset + temp * rowlen + ri] = csr_val_temp[pre_nnz + j]; // mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); // printf("pos = %i, mask = %i\n", ri, mask[tile_id * BLOCK_SIZE + ri]); } if (SPGEMM && !SPMV) { for (int k=stop; k< nnz_end; k++) { unsigned char colidx = csr_colidx_temp[pre_nnz +k]; Tile_hyb_Val[offset + blkwidth[tile_id] * rowlen + coocount] = csr_val_temp[pre_nnz +k]; Tile_hyb_ellcolIdx[offset + blkwidth[tile_id] * rowlen + coocount] = csr_colidx_temp[pre_nnz +k]; Tile_hyb_coorowIdx[hyb_coocount[tile_id] + coocount] = ri; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); coocount++; } } if(SPMV && !SPGEMM) { for (int k=stop; k< nnz_end; k++) { new_coo_value[offset_new + coocount] = csr_val_temp[pre_nnz +k]; new_coo_colidx[offset_new+coocount] = csr_colidx_temp[pre_nnz +k] + colidx_temp * BLOCK_SIZE; new_coo_rowidx[offset_new+coocount] = ri + blki * BLOCK_SIZE; coocount++; } } } for (int ri =0; ri < rowlen; ri ++) { for (int bi = 0; bi < blkwidth[tile_id]; bi ++) { int colidx = Tile_hyb_ellcolIdx[offset + bi * rowlen + ri]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } break; } case 4: { int offset = dns_offset[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ri +1]; for (int j = ptr_temp[ri]; j < nnz_end; j++) { unsigned char colidx = csr_colidx_temp[pre_nnz +j]; Tile_dns_Val[offset + csr_colidx_temp[pre_nnz + j] * rowlen +ri] = csr_val_temp[pre_nnz + j]; // mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); // Blockdense_Val[dnsnum[rowblock_ptr[rbi]+bi] + subrowmatrixA[bi].columnindex[j] * rowlength + ri]= subrowmatrixA[bi].value[j]; } } for (int ri =0; ri < rowlen; ri ++) { for(int j =0; j < BLOCK_SIZE; j ++) { mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - j - 1)); } } break; } case 5: { int offset = dnsrow_offset[tile_id]; int rowoffset = denserowptr[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); int dnsriid=0; for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ri +1]; if (nnz_end - ptr_temp[ri] == collen) { // printf("tileid = %i, offset = %i, rowoffset = %i, num = %i\n", tile_id, offset, rowoffset, csr_ptr[tile_id * BLOCK_SIZE + ri]); Tile_dnsrow_idx[rowoffset + dnsriid]=ri; dnsriid ++; for (int j = ptr_temp[ri]; j < nnz_end; j++) { unsigned char colidx = csr_colidx_temp[pre_nnz +j]; Tile_dnsrow_Val[offset + j] = csr_val_temp[pre_nnz + j]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } } break; } case 6: { int offset = dnscol_offset[tile_id]; int coloffset = densecolptr[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); // for (int ni =0; ni < rowlen ; ni ++) // { // printf("%i ", ptr_temp[ni]); // } // printf("\n"); int dnsciid=0; for (int j=ptr_temp[0];j < ptr_temp[1];j ++) { int ci = csr_colidx_temp[pre_nnz + j]; // int ci = subrowmatrixA[bi].columnindex[j] ; Tile_dnscol_idx[coloffset + dnsciid] =ci ; // printf("pos=%i, colidx=%i\n",densecolptr[tile_id + dnsciid],Tile_dnscol_idx[coloffset + dnsciid] ); dnsciid++; } for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ri +1]; for (int j = ptr_temp[ri]; j < nnz_end; j++) { int temp = j - ptr_temp[ri]; unsigned char colidx = csr_colidx_temp[pre_nnz +j];//temp; // if (csr_val_temp[pre_nnz +j] != 0) // printf("idx = %i, col=%i, val = %f\n",pre_nnz +j, csr_colidx_temp[pre_nnz +j] , csr_val_temp[pre_nnz +j]); Tile_dnscol_Val[offset + temp * rowlen + ri] = csr_val_temp[pre_nnz +j]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } break; } default: break; } } } free(csr_colidx_temp_g); free(csr_val_temp_g); free(tile_count_g); } void Tile_destroy(Beidou_Tile_Matrix *matrix) { free(matrix->Tile_csr_Col); matrix->Tile_csr_Col = NULL; free(matrix->Tile_csr_Ptr); matrix->Tile_csr_Ptr = NULL; free(matrix->Tile_csr_Val); matrix->Tile_csr_Val = NULL; free(matrix->Tile_coo_colIdx); matrix->Tile_coo_colIdx = NULL; free(matrix->Tile_coo_rowIdx); matrix->Tile_coo_rowIdx = NULL; free(matrix->Tile_coo_Val); matrix->Tile_coo_Val = NULL; free(matrix->Tile_ell_colIdx); matrix->Tile_ell_colIdx = NULL; free(matrix->Tile_ell_Val); matrix->Tile_ell_Val = NULL; free(matrix->Tile_hyb_coorowIdx); matrix->Tile_hyb_coorowIdx = NULL; free(matrix->Tile_hyb_ellcolIdx); matrix->Tile_hyb_ellcolIdx = NULL; free(matrix->Tile_hyb_Val); matrix->Tile_hyb_Val = NULL; free(matrix->Tile_dns_Val); matrix->Tile_dns_Val = NULL; free(matrix->Tile_dnsrow_idx); matrix->Tile_dnsrow_idx = NULL; free(matrix->Tile_dnsrow_Val); matrix->Tile_dnsrow_Val = NULL; free(matrix->Tile_dnscol_Val); matrix->Tile_dnscol_Val = NULL; free(matrix->Tile_dnscol_idx); matrix->Tile_dnscol_idx = NULL; free(matrix->densecolptr); matrix->densecolptr = NULL; free(matrix->denserowptr); matrix->denserowptr = NULL; free(matrix->blkwidth); matrix->blkwidth = NULL; free(matrix->tile_ptr); matrix->tile_ptr = NULL; free(matrix->tile_columnidx); matrix->tile_columnidx = NULL; free(matrix->tile_nnz); matrix->tile_nnz = NULL; free(matrix->blknnz); matrix->blknnz = NULL; free(matrix->value); matrix->value = NULL; free(matrix->columnidx); matrix->columnidx = NULL; free(matrix->coo_new_matrix_ptr); matrix->coo_new_matrix_ptr = NULL; free(matrix->coo_new_rowidx); matrix->coo_new_rowidx = NULL; free(matrix->coo_new_matrix_value); matrix->coo_new_matrix_value = NULL; free(matrix->coo_new_matrix_colidx); matrix->coo_new_matrix_colidx = NULL; free(matrix->csr_ptr); free(matrix->csr_offset); free(matrix->csrptr_offset); free(matrix->coo_offset); free(matrix->ell_offset); free(matrix->hyb_offset); free(matrix->dns_offset); free(matrix->dnsrow_offset); free(matrix->dnscol_offset); } void format_transform(Beidou_Tile_Matrix *matrix, MAT_VAL_TYPE **new_coo_value_temp, int **new_coo_colidx_temp, int **new_coo_rowidx_temp, int **new_coocount_temp) { //unsigned char *csr_ptr = matrix->csr_ptr; // int *csr_offset = matrix->csr_offset; // int *csrptr_offset = matrix->csrptr_offset; // int *coo_offset = matrix->coo_offset; // int *ell_offset = matrix->ell_offset; // int *hyb_offset = matrix->hyb_offset; // int *dns_offset = matrix->dns_offset; // int *dnsrow_offset = matrix->dnsrow_offset; // int *dnscol_offset = matrix->dnscol_offset; // int *hyb_coocount = matrix->hyb_coocount; struct timeval t1, t2; gettimeofday(&t1, NULL); // step1_kernel(matrixA->m, matrixA->n, matrixA->rowpointer, matrixA->columnidx, // matrixA->tilem, matrixA->tilen, matrixA->tile_ptr, &matrixA->numtile); step1_kernel(matrix); gettimeofday(&t2, NULL); // printf("t1=%f,t2=%f\n",t1,t2); double time_step1 = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; printf("transform_step1 runtime = %4.5f ms\n", time_step1); exclusive_scan(matrix->tile_ptr, matrix->tilem+1); matrix->numtile = matrix->tile_ptr[matrix->tilem]; printf("the number of tiles in matrix A= %d\n",matrix->numtile); matrix->tile_columnidx=(int *)malloc(matrix->numtile*sizeof(int)); memset(matrix->tile_columnidx, 0, matrix->numtile*sizeof(int)); matrix->tile_nnz = (int *)malloc((matrix->numtile + 1)* sizeof(int)); //real nnz of each sparse tile memset(matrix->tile_nnz,0,(matrix->numtile + 1) * sizeof(int)); matrix->csr_ptr = (unsigned char *)malloc((matrix->numtile * BLOCK_SIZE) * sizeof(unsigned char)); memset (matrix->csr_ptr, 0, (matrix->numtile * BLOCK_SIZE) * sizeof(unsigned char)); gettimeofday(&t1, NULL); // step2_kernel_new(matrixA->m, matrixA->n, matrixA->rowpointer, matrixA->columnidx, // matrixA->tilem, matrixA->tilen, matrixA->tile_ptr, matrixA->tile_columnidx, matrixA->tile_nnz, csr_ptr, matrixA->numtile); step2_kernel_new(matrix, matrix->csr_ptr); gettimeofday(&t2, NULL); double time_step2 = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; printf("transform_step2 runtime = %4.5f ms\n", time_step2); exclusive_scan(matrix->tile_nnz, matrix->numtile +1); //format 0-7 represent 7 formats: CSR, COO, ELL, HYB, Dns, DnsRow, DnsCol matrix->Format =(char *)malloc(matrix->numtile* sizeof(char)); memset(matrix->Format,0,matrix->numtile * sizeof(char)); matrix->blknnz = (int *)malloc((matrix->numtile + 1)* sizeof(int)); //space cost that need allocate memset(matrix->blknnz,0,(matrix->numtile + 1) * sizeof(int)); //dense int dense_size=0; matrix->dns_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->dns_offset, 0, (matrix->numtile+1) * sizeof(int)); //denserow matrix->denserowptr = (int *)malloc((matrix->numtile + 1) * sizeof(int)); memset(matrix->denserowptr,0,(matrix->numtile+ 1) * sizeof(int)); int denserow_size =0 ; matrix->dnsrow_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->dnsrow_offset, 0, (matrix->numtile+1) * sizeof(int)); //densecolumn matrix->densecolptr = (int *)malloc((matrix->numtile + 1) * sizeof(int)); memset(matrix->densecolptr,0,(matrix->numtile+ 1) * sizeof(int)); int densecol_size =0 ; matrix->dnscol_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->dnscol_offset, 0, (matrix->numtile+1) * sizeof(int)); //CSR int csrsize=0; // int csrptrlen=0; matrix->csr_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->csr_offset, 0, (matrix->numtile+1) * sizeof(int)); matrix->csrptr_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->csrptr_offset, 0, (matrix->numtile+1) * sizeof(int)); //ELL int ellsize =0; matrix->ell_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->ell_offset, 0, (matrix->numtile+1) * sizeof(int)); //COO int coosize =0; matrix->coo_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->coo_offset, 0, (matrix->numtile+1) * sizeof(int)); //HYB int hybellsize =0; int hybcoosize =0; int hybsize =0; matrix->blkwidth = (char *)malloc(matrix->numtile*sizeof(char)); memset(matrix->blkwidth,0,matrix->numtile * sizeof(char)) ; matrix->hyb_coocount= (int *)malloc((matrix->numtile + 1) * sizeof(int)); memset(matrix->hyb_coocount,0,(matrix->numtile + 1) * sizeof(int)) ; matrix->hyb_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->hyb_offset, 0, (matrix->numtile+1) * sizeof(int)); *new_coocount_temp = (int *)malloc((matrix->numtile + 1) * sizeof(int)); memset(*new_coocount_temp,0,(matrix->numtile + 1) * sizeof(int)) ; int *new_coocount = *new_coocount_temp; gettimeofday(&t1, NULL); // step3_kernel_new(matrixA->m, matrixA->n, matrixA->rowpointer, matrixA->columnidx, // matrixA->tilem, matrixA->tilen, matrixA->numtile, matrixA->tile_ptr, matrixA->tile_columnidx, matrixA->tile_nnz, matrixA->Format, // csr_ptr, matrixA->blknnz, matrixA->blkwidth, hyb_coocount, // matrixA->denserowptr, matrixA->densecolptr, // csr_offset, csrptr_offset, coo_offset, ell_offset, hyb_offset, dns_offset, dnsrow_offset, dnscol_offset); step3_kernel_new(matrix, new_coocount); gettimeofday(&t2, NULL); double time_step3 = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; printf("transform_step3 runtime = %4.5f ms\n", time_step3); exclusive_scan(matrix->csr_offset, matrix->numtile +1); exclusive_scan(matrix->csrptr_offset, matrix->numtile +1); exclusive_scan(matrix->coo_offset, matrix->numtile +1); exclusive_scan(matrix->ell_offset, matrix->numtile +1); exclusive_scan(matrix->hyb_offset, matrix->numtile +1); exclusive_scan(matrix->dns_offset, matrix->numtile +1); exclusive_scan(matrix->dnsrow_offset, matrix->numtile +1); exclusive_scan(matrix->dnscol_offset, matrix->numtile +1); exclusive_scan(matrix->denserowptr,matrix->numtile+1); exclusive_scan(matrix->densecolptr,matrix->numtile+1); exclusive_scan(matrix->hyb_coocount, matrix->numtile +1); hybcoosize = matrix->hyb_coocount[matrix->numtile]; exclusive_scan(new_coocount, matrix->numtile +1); matrix->coocount = new_coocount[ matrix->numtile]; for (int blki=0;blki<matrix->tilem;blki++) { int rowlength= blki==matrix->tilem-1 ? matrix->m-(matrix->tilem-1)*BLOCK_SIZE : BLOCK_SIZE ; int rowbnum=matrix->tile_ptr[blki+1]-matrix->tile_ptr[blki]; for (int bi=0;bi<rowbnum;bi++) { char format= matrix->Format[matrix->tile_ptr[blki]+bi]; switch (format) { case 0: //csr csrsize += matrix->blknnz[matrix->tile_ptr[blki]+bi]; // csrptrlen += rowlength ; break; case 1: //coo coosize += matrix->blknnz[matrix->tile_ptr[blki]+bi]; break; case 2: //ell ellsize += matrix->blknnz[matrix->tile_ptr[blki]+bi] ; break; case 3: //hyb hybsize += matrix->blknnz[matrix->tile_ptr[blki]+bi]; hybellsize += matrix->blkwidth[matrix->tile_ptr[blki]+bi] * rowlength; break; case 4: dense_size += matrix->blknnz[matrix->tile_ptr[blki]+bi]; break; case 5: denserow_size += matrix->blknnz[matrix->tile_ptr[blki]+bi]; break; case 6: densecol_size += matrix->blknnz[matrix->tile_ptr[blki]+bi]; break; default: break; } } } exclusive_scan(matrix->blknnz,(matrix->numtile+1)); int *formatnum = (int *)malloc(7 * sizeof(int)); memset(formatnum,0,7 * sizeof(int)); for (int j=0;j<7;j++) { for (int i=0;i<matrix->numtile;i++) { if (matrix->Format[i]==j) { formatnum[j]++; // printf("%d ",Format[i]); // break ; } } } for (int j=0;j<7;j++) { printf("format =%i,count =%i\n",j,formatnum[j]); } int csrtilecount = formatnum[0]; int nnz_temp =0; int tile_count_temp =0; for (int blki =0;blki < matrix->tilem; blki ++) { int start= blki*BLOCK_SIZE; int end = blki==matrix->tilem-1 ? matrix->m : (blki+1)*BLOCK_SIZE ; nnz_temp = nnz_temp < matrix->rowpointer[end] - matrix->rowpointer[start] ? matrix->rowpointer[end] - matrix->rowpointer[start] : nnz_temp; tile_count_temp = tile_count_temp < matrix->tile_ptr[blki +1] - matrix->tile_ptr[blki] ? matrix->tile_ptr[blki +1] - matrix->tile_ptr[blki] : tile_count_temp; } //CSR matrix->Tile_csr_Val=(MAT_VAL_TYPE*)malloc((csrsize)*sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_csr_Val, 0, (csrsize)*sizeof(MAT_VAL_TYPE)); matrix->Tile_csr_Col=(unsigned char*)malloc((csrsize)*sizeof(unsigned char)); memset(matrix->Tile_csr_Col, 0, (csrsize)*sizeof(unsigned char)); matrix->Tile_csr_Ptr=(unsigned char*)malloc((csrtilecount * BLOCK_SIZE)*sizeof(unsigned char)); memset(matrix->Tile_csr_Ptr, 0, (csrtilecount * BLOCK_SIZE )*sizeof(unsigned char)); //COO matrix->Tile_coo_Val=(MAT_VAL_TYPE*)malloc((coosize)*sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_coo_Val, 0, (coosize)*sizeof(MAT_VAL_TYPE)); matrix->Tile_coo_colIdx=(unsigned char*)malloc((coosize)*sizeof(unsigned char)); memset(matrix->Tile_coo_colIdx, 0, (coosize)*sizeof(unsigned char)); matrix->Tile_coo_rowIdx=(unsigned char*)malloc((coosize)*sizeof(unsigned char)); memset(matrix->Tile_coo_rowIdx, 0, (coosize)*sizeof(unsigned char)); //ELL matrix->Tile_ell_Val=(MAT_VAL_TYPE*)malloc((ellsize)*sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_ell_Val,0,(ellsize)*sizeof(MAT_VAL_TYPE)); matrix->Tile_ell_colIdx=(unsigned char*)malloc((ellsize)*sizeof(unsigned char)); memset(matrix->Tile_ell_colIdx, 0, sizeof(unsigned char) * ellsize); //HYB matrix->Tile_hyb_Val=(MAT_VAL_TYPE*)malloc((hybellsize+hybcoosize)*sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_hyb_Val,0,(hybellsize+hybcoosize)*sizeof(MAT_VAL_TYPE)); matrix->Tile_hyb_ellcolIdx=(unsigned char*)malloc((hybellsize+hybcoosize)*sizeof(unsigned char)); matrix->Tile_hyb_coorowIdx=(unsigned char*)malloc((hybcoosize)*sizeof(unsigned char)) ; memset(matrix->Tile_hyb_ellcolIdx, 0, sizeof(unsigned char) * (hybellsize+hybcoosize)); memset(matrix->Tile_hyb_coorowIdx, 0, sizeof(unsigned char) * hybcoosize); //dense matrix->Tile_dns_Val=(MAT_VAL_TYPE*)malloc((dense_size)*sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_dns_Val,0,dense_size * sizeof(MAT_VAL_TYPE)); //dense row matrix->Tile_dnsrow_Val=(MAT_VAL_TYPE*)malloc((denserow_size) * sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_dnsrow_Val,0,denserow_size * sizeof(MAT_VAL_TYPE)); matrix->Tile_dnsrow_idx = (char *)malloc(matrix->denserowptr[matrix->numtile] * sizeof(char)); memset(matrix->Tile_dnsrow_idx, 0, matrix->denserowptr[matrix->numtile] * sizeof(char)); //dense column matrix->Tile_dnscol_Val=(MAT_VAL_TYPE*)malloc((densecol_size) * sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_dnscol_Val, 0, densecol_size * sizeof(MAT_VAL_TYPE)); matrix->Tile_dnscol_idx = (char *)malloc(matrix->densecolptr[matrix->numtile] * sizeof(char)); memset(matrix->Tile_dnscol_idx, 0, matrix->densecolptr[matrix->numtile] * sizeof(char)); //extract COO to a new matrix matrix->coocount = hybcoosize + coosize; *new_coo_value_temp = (MAT_VAL_TYPE*)malloc(matrix->coocount *sizeof(MAT_VAL_TYPE)); memset(*new_coo_value_temp, 0, (matrix->coocount) *sizeof(MAT_VAL_TYPE)); MAT_VAL_TYPE *new_coo_value = *new_coo_value_temp; *new_coo_rowidx_temp = (int *)malloc((hybcoosize+ coosize) *sizeof(int)); memset(*new_coo_rowidx_temp, 0, (hybcoosize+coosize) *sizeof(int)); int *new_coo_rowidx = *new_coo_rowidx_temp; *new_coo_colidx_temp = (int *)malloc((matrix->coocount) *sizeof(int)); memset(*new_coo_colidx_temp, 0, (matrix->coocount) *sizeof(int)); int *new_coo_colidx = *new_coo_colidx_temp; //mask matrix->mask = (unsigned short *)malloc(matrix->numtile * BLOCK_SIZE * sizeof(unsigned short)); memset(matrix->mask, 0, matrix->numtile * BLOCK_SIZE * sizeof(unsigned short)); gettimeofday(&t1, NULL); // step4_kernel(matrixA->m, matrixA->n, matrixA->rowpointer, matrixA->columnidx, matrixA->value, // matrixA->tilem, matrixA->tilen, matrixA->numtile, matrixA->tile_ptr, matrixA->tile_columnidx, matrixA->tile_nnz, matrixA->Format, // matrixA->blknnz, csr_ptr, nnz_temp, tile_count_temp, // matrixA->Tile_csr_Val, matrixA->Tile_csr_Col, matrixA->Tile_csr_Ptr, csr_offset, csrptr_offset, // matrixA->Tile_coo_Val, matrixA->Tile_coo_colIdx, matrixA->Tile_coo_rowIdx, coo_offset, // matrixA->Tile_ell_Val, matrixA->Tile_ell_colIdx, matrixA->blkwidth, ell_offset, // matrixA->Tile_hyb_Val, matrixA->Tile_hyb_ellcolIdx, matrixA->Tile_hyb_coorowIdx, hyb_coocount, hyb_offset, // matrixA->Tile_dns_Val, dns_offset, // matrixA->Tile_dnsrow_Val, matrixA->Tile_dnsrow_idx, matrixA->denserowptr, dnsrow_offset, // matrixA->Tile_dnscol_Val, matrixA->Tile_dnscol_idx, matrixA->densecolptr, dnscol_offset); step4_kernel(matrix, matrix->csr_ptr, matrix->hyb_coocount, nnz_temp, tile_count_temp, matrix->csr_offset, matrix->csrptr_offset, matrix->coo_offset, matrix->ell_offset, matrix->hyb_offset, matrix->dns_offset, matrix->dnsrow_offset, matrix->dnscol_offset, new_coo_value,new_coo_colidx, new_coo_rowidx, new_coocount); gettimeofday(&t2, NULL); double time_step4 = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; printf("transform_step4 runtime = %4.5f ms\n", time_step4); } #endif
#ifndef FORMAT_TRANS #define FORMAT_TRANS #include"common.h" #include"mmio_highlevel.h" //#include"mmio.h" #include"utils.h" //calculate the number of non-empty tiles of matrix A void step1_kernel(Beidou_Tile_Matrix *matrix) // (int m, int n, MAT_PTR_TYPE *rowpointer, int *columnidx, // int tilem, int tilen, MAT_PTR_TYPE *tile_ptr, int *numtile) { int *rowpointer=matrix->rowpointer; int m = matrix->m; int n = matrix->n; int *columnidx = matrix->columnidx; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int numtile = matrix->numtile; unsigned thread = omp_get_max_threads(); printf("threads=%i\n",thread); char *flag_g=(char *)malloc(thread*tilen * sizeof(char)); #pragma omp parallel for for (int blki = 0; blki < tilem; blki ++) { int thread_id = omp_get_thread_num(); // printf("id =%d\n",thread_id); char *flag = flag_g+ thread_id * tilen; memset(flag,0,tilen * sizeof(char)); int start = blki *BLOCK_SIZE; int end = blki == tilem-1 ? m : (blki+1)* BLOCK_SIZE ; for (int j = rowpointer[start]; j < rowpointer[end]; j ++) { int jc = columnidx[j] / BLOCK_SIZE; if (flag[jc]==0) { flag[jc]=1; tile_ptr[blki]++; } } // free(flag); } free(flag_g); } void step2_kernel(Beidou_Tile_Matrix *matrix) // (int rowA, int coA, int *rowpointerA, int *columnindexA, // int tilem, int tilenA, MAT_PTR_TYPE *tile_ptr, int *tile_columnidx, int numtileA) { int *rowpointer=matrix->rowpointer; int *columnidx = matrix->columnidx; int m = matrix->m; int n = matrix->n; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int numtile = matrix->numtile; int *tile_columnidx = matrix->columnidx; int colid =0; char *flag=(char *)malloc(tilen * sizeof(char)); for (int i=0;i<tilem;i++) { memset(flag,0,tilen*sizeof(char)); int start= i*BLOCK_SIZE; int end = i== tilem-1 ? m : (i+1)*BLOCK_SIZE ; for (int j=rowpointer[start];j< rowpointer[end];j++) { int jc=columnidx[j]/BLOCK_SIZE; if (flag[jc]==0) { flag[jc]=1; tile_ptr[i+1]++; tile_columnidx[colid]=jc; colid++; } } } for (int i=1;i<tilem+1;i++) { tile_ptr[i] += tile_ptr[i-1]; } } //determine the tile structure (tileptr , tile columnidx and tile_nnz) of matrix A. void step2_kernel_new (Beidou_Tile_Matrix *matrix, unsigned char *tile_csr_ptr) // (int m, int n, int *rowpointer, int *columnidx, // int tilem, int tilen, MAT_PTR_TYPE *tile_ptr, int *tile_columnidx, int *tile_nnz, // unsigned char *tile_csr_ptr, int numtile) { int m = matrix->m; int n = matrix->n; int *rowpointer=matrix->rowpointer; int *columnidx = matrix->columnidx; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int *tile_columnidx = matrix->tile_columnidx; int *tile_nnz = matrix->tile_nnz; int numtile = matrix->numtile; unsigned thread = omp_get_max_threads(); char *col_temp_g=(char *)malloc((thread * tilen) * sizeof(char)); int *nnz_temp_g=(int *)malloc((thread * tilen) * sizeof(int)); unsigned char *ptr_per_tile_g = (unsigned char *)malloc((thread * tilen * BLOCK_SIZE) * sizeof(unsigned char)); #pragma omp parallel for for (int blki = 0; blki < tilem; blki ++) { int thread_id = omp_get_thread_num(); char *col_temp = col_temp_g + thread_id * tilen; memset(col_temp,0,tilen * sizeof(char)); int *nnz_temp = nnz_temp_g + thread_id * tilen; memset(nnz_temp,0,tilen * sizeof(int)); unsigned char *ptr_per_tile = ptr_per_tile_g + thread_id * tilen * BLOCK_SIZE; memset(ptr_per_tile, 0, tilen * BLOCK_SIZE * sizeof(unsigned char)); int pre_tile = tile_ptr[blki]; int rowlen = blki==tilem-1 ? m-(tilem-1)*BLOCK_SIZE : BLOCK_SIZE ; int start= blki * BLOCK_SIZE; int end = blki==tilem-1 ? m : (blki +1)*BLOCK_SIZE ; for (int ri=0 ; ri < rowlen ; ri ++) { for (int j=rowpointer[start + ri];j<rowpointer[start + ri +1];j++) { int jc = columnidx[j]/BLOCK_SIZE; col_temp[jc] = 1; nnz_temp[jc] ++; ptr_per_tile[jc * BLOCK_SIZE + ri] ++; } } int count =0; for (int blkj=0 ;blkj < tilen; blkj++) { if (col_temp[blkj] == 1) { tile_columnidx[pre_tile + count] = blkj; tile_nnz[pre_tile + count] = nnz_temp[blkj]; for (int ri =0; ri < rowlen ; ri ++) { tile_csr_ptr[(pre_tile + count) * BLOCK_SIZE + ri] = ptr_per_tile[blkj * BLOCK_SIZE + ri]; } count ++; } } } free(col_temp_g); free(nnz_temp_g); free(ptr_per_tile_g); } void step3_kernel_new(Beidou_Tile_Matrix *matrix, int *new_coocount) { int *rowpointer=matrix->rowpointer; int *columnidx = matrix->columnidx; int m = matrix->m; int n = matrix->n; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int numtile = matrix->numtile; int *tile_columnidx = matrix->tile_columnidx; int *tile_nnz = matrix->tile_nnz; char *Format = matrix->Format; int *blknnz = matrix->blknnz; char *blkwidth = matrix->blkwidth; int *denserowptr = matrix->denserowptr; int *densecolptr = matrix->densecolptr; unsigned char *tile_csr_ptr = matrix->csr_ptr; int *hyb_coocount = matrix->hyb_coocount; int *csr_offset = matrix->csr_offset; int *csrptr_offset = matrix->csrptr_offset; int *coo_offset = matrix->coo_offset; int *ell_offset = matrix->ell_offset; int *hyb_offset = matrix->hyb_offset; int *dns_offset = matrix->dns_offset; int *dnsrow_offset = matrix->dnsrow_offset; int *dnscol_offset = matrix->dnscol_offset; #pragma omp parallel for for (int blki=0;blki<tilem;blki++) { int tilenum_per_row=tile_ptr[blki+1]-tile_ptr[blki]; int rowlen= blki==tilem-1 ? m-(tilem-1)*BLOCK_SIZE : BLOCK_SIZE ; for (int bi=0;bi<tilenum_per_row;bi++) { int collen = tile_columnidx[tile_ptr[blki]+bi] == tilen-1 ? n - (tilen-1 ) * BLOCK_SIZE : BLOCK_SIZE ; int tile_id = tile_ptr[blki]+bi; int tilennz = tile_nnz[tile_id +1] - tile_nnz[tile_id]; int nnzthreshold = rowlen * collen * 0.5 ; // if (1) // { // Format[tile_id] =0 ; // blknnz[tile_id] = tilennz ; // csr_offset[tile_id] = tilennz; // csrptr_offset[tile_id] = rowlen; // } if (tilennz >= nnzthreshold) //if the number of nnz is more than 128, then dense { Format[tile_id] = 4 ; blknnz[tile_id] = rowlen * collen; dns_offset[tile_id] = rowlen * collen; continue; } if (tilennz <= COO_THRESHOLD) //else if the number of nnz is less than 12, then coo { Format[tile_id] = 1 ; blknnz[tile_id] = tilennz; coo_offset[tile_id] = tilennz; new_coocount[tile_id] = tilennz; continue; } else if (tilennz % collen ==0 || tilennz % rowlen ==0) { int dnsrowflag =0 ; int numdnsrow =0; int dnscolflag =0; int numdnscol =0; for (int ri=0;ri < rowlen ;ri++) { if (tile_csr_ptr[tile_id * BLOCK_SIZE + ri] % collen !=0) { dnsrowflag =0; break; } else { if (tile_csr_ptr[tile_id * BLOCK_SIZE + ri] == collen) { dnsrowflag =1; numdnsrow ++ ; } } } if (dnsrowflag == 1) { Format[tile_id] = 5 ; //Dense Row denserowptr[tile_id] = numdnsrow ; blknnz[tile_id] = numdnsrow * collen; dnsrow_offset[tile_id] = numdnsrow * collen; continue; } else { int start = blki*BLOCK_SIZE; int end = blki==tilem-1 ? m : (blki+1)*BLOCK_SIZE ; int jc = tile_columnidx[tile_id]; unsigned char *dnscol_colidx_temp= (unsigned char *)malloc(tilennz * sizeof(unsigned char)); memset(dnscol_colidx_temp, -1, tilennz * sizeof(unsigned char)); // int k=0; unsigned char *col_flag =(unsigned char *)malloc(collen * sizeof(unsigned char)); memset(col_flag, 0, collen * sizeof(unsigned char)); for (int blkj = rowpointer[start]; blkj < rowpointer[end]; blkj ++) { int jc_temp = columnidx[blkj]/BLOCK_SIZE; if (jc_temp == jc) { int col_temp = columnidx[blkj] - jc * BLOCK_SIZE; col_flag[col_temp] ++; // dnscol_colidx_temp[k]= columnindexA[blkj] - jc * BLOCK_SIZE; // if (tile_id == 389) // printf("colidx = %i\n", dnscol_colidx_temp[k]); // k++; } } for (int j =0; j < collen; j ++) { if (col_flag[j] % rowlen !=0) { dnscolflag =0; break; } else { if (col_flag[j] == rowlen) { dnscolflag =1; numdnscol ++ ; } } } if (dnscolflag == 1) { // printf("numdnscol = %i\n", numdnscol); Format[tile_id] = 6 ; //Dense Col densecolptr[tile_id] = numdnscol ; blknnz[tile_id] = numdnscol * rowlen; dnscol_offset[tile_id] = numdnscol * rowlen; continue; } // unsigned char *trans_ptr= (unsigned char *)malloc(collen * sizeof(unsigned char)); // memset(trans_ptr, 0, collen * sizeof(unsigned char)); // for (int ni =0; ni < tilennz; ni ++) // { // int coltemp = dnscol_colidx_temp[ni]; // trans_ptr[coltemp]++; // } // for (int ri=0;ri < rowlen ;ri++) // { // if (trans_ptr[ri] % rowlen !=0) // { // dnscolflag =0; // break; // } // else // { // if (trans_ptr[ri] == rowlen) // { // dnscolflag =1; // numdnscol ++ ; // } // } // } // if (dnscolflag == 1) // { // // printf("numdnscol = %i\n", numdnscol); // Format[tile_id] = 6 ; //Dense Col // densecolptr[tile_id] = numdnscol ; // blknnz[tile_id] = numdnscol * rowlen; // dnscol_offset[tile_id] = numdnscol * rowlen; // continue; // } } } if (Format[tile_id] != 5 && Format[tile_id] !=6) { int bwidth=0; for (int blkj=0;blkj<rowlen;blkj++) { if (bwidth < tile_csr_ptr[tile_id * BLOCK_SIZE + blkj] ) bwidth = tile_csr_ptr[tile_id * BLOCK_SIZE + blkj] ; } double row_length_mean = ((double)tilennz) / rowlen; double variance = 0.0; double row_length_skewness = 0.0; for (int row = 0; row < rowlen; ++row) { int length = tile_csr_ptr[tile_id * BLOCK_SIZE + row]; double delta = (double)(length - row_length_mean); variance += (delta * delta); row_length_skewness += (delta * delta * delta); } variance /= rowlen; double row_length_std_dev = sqrt(variance); row_length_skewness = (row_length_skewness / rowlen) / pow(row_length_std_dev, 3.0); double row_length_variation = row_length_std_dev / row_length_mean; double ell_csr_threshold = 0.2; double csr_hyb_threshold = 1.0; if (row_length_variation <= ell_csr_threshold) // if variation is less than 0.2, then ELL { Format[tile_id] = 2; blkwidth[tile_id]=bwidth; blknnz[tile_id] = bwidth * rowlen ; ell_offset[tile_id] = bwidth * rowlen; } else { int hybwidth=bwidth; int iopriorsize= bwidth * rowlen * sizeof (MAT_VAL_TYPE) + bwidth * rowlen * sizeof (unsigned char) ; // bwidth * rowlen * sizeof (MAT_VAL_TYPE) + bwidth * rowlen * sizeof (char) /2 +1 ; int ionextsize; int coonextnum=0; int coopriornum=0; for (int wi=bwidth-1;wi>0;wi--) { coonextnum=0; for (int blkj=0;blkj<rowlen;blkj++) { if ( tile_csr_ptr[tile_id * BLOCK_SIZE + blkj]> wi) { coonextnum += tile_csr_ptr[tile_id * BLOCK_SIZE + blkj] - wi ; } } ionextsize= wi * rowlen * sizeof (MAT_VAL_TYPE )+ wi * rowlen * sizeof (unsigned char) + coonextnum * (sizeof (MAT_VAL_TYPE) + sizeof (unsigned char)) ; // wi * rowlen * sizeof (MAT_VAL_TYPE )+ wi * rowlen * sizeof (char) /2 + 1 + coonextnum * (sizeof (MAT_VAL_TYPE) + sizeof (char)) ; if (iopriorsize<=ionextsize) { hybwidth=wi+1; break; } else { hybwidth = wi; iopriorsize=ionextsize; coopriornum=coonextnum; } } if (row_length_variation >= csr_hyb_threshold )//&& coopriornum <= 4) // if variation > 1.0, and the number of coo data <=4, then HYB { Format[tile_id] = 3; hyb_coocount[tile_id] = coopriornum; blkwidth[tile_id]=hybwidth; blknnz[tile_id] = coopriornum + hybwidth * rowlen ; hyb_offset[tile_id] = coopriornum + hybwidth * rowlen; new_coocount[tile_id] = coopriornum; } else //else CSR { Format[tile_id] =0 ; blknnz[tile_id] = tilennz ; csr_offset[tile_id] = tilennz; csrptr_offset[tile_id] = BLOCK_SIZE; } } } } } } void step4_kernel(Beidou_Tile_Matrix *matrix, unsigned char *csr_ptr, int *hyb_coocount, int nnz_temp, int tile_count_temp, int *csr_offset, int *csrptr_offset, int *coo_offset, int *ell_offset, int *hyb_offset, int *dns_offset, int *dnsrow_offset, int *dnscol_offset, MAT_VAL_TYPE *new_coo_value, int *new_coo_colidx, int *new_coo_rowidx, int *new_coocount) // (int m, int n, int *rowpointer, int *columnidx, MAT_VAL_TYPE *value, // int tilem, int tilen, int numtile, MAT_PTR_TYPE *tile_ptr, int *tile_columnidx, int *tile_nnz, char *Format, // int *blknnz, unsigned char *csr_ptr, int nnz_temp, int tile_count_temp, // MAT_VAL_TYPE *Tile_csr_Val, unsigned char *Tile_csr_Col, unsigned char *Tile_csr_Ptr, int *csr_offset, int *csrptr_offset, // MAT_VAL_TYPE *Tile_coo_Val, unsigned char *Tile_coo_colIdx, unsigned char *Tile_coo_rowIdx, int *coo_offset, // MAT_VAL_TYPE *Tile_ell_Val, unsigned char *Tile_ell_colIdx, char *blkwidth, int *ell_offset, // MAT_VAL_TYPE *Tile_hyb_Val, unsigned char *Tile_hyb_ellcolIdx, unsigned char *Tile_hyb_coorowIdx, int *hyb_coocount, int *hyb_offset, // MAT_VAL_TYPE *Tile_dns_Val, int *dns_offset, // MAT_VAL_TYPE *Tile_dnsrow_Val, char *Tile_dnsrow_idx, int * denserowptr, int *dnsrow_offset, // MAT_VAL_TYPE *Tile_dnscol_Val, char *Tile_dnscol_idx, int *densecolptr, int *dnscol_offset){ { int *rowpointer=matrix->rowpointer; int *columnidx = matrix->columnidx; MAT_VAL_TYPE *value = matrix->value; int m = matrix->m; int n = matrix->n; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int numtile = matrix->numtile; int *tile_columnidx = matrix->tile_columnidx; int *tile_nnz = matrix->tile_nnz; char *Format = matrix->Format; int *blknnz = matrix->blknnz; char *blkwidth = matrix->blkwidth; MAT_VAL_TYPE *Tile_csr_Val = matrix->Tile_csr_Val; unsigned char *Tile_csr_Col = matrix->Tile_csr_Col; unsigned char *Tile_csr_Ptr = matrix->Tile_csr_Ptr; MAT_VAL_TYPE *Tile_coo_Val = matrix->Tile_coo_Val; unsigned char *Tile_coo_colIdx = matrix->Tile_coo_colIdx; unsigned char *Tile_coo_rowIdx = matrix->Tile_coo_rowIdx; MAT_VAL_TYPE *Tile_ell_Val = matrix->Tile_ell_Val; unsigned char *Tile_ell_colIdx = matrix->Tile_ell_colIdx; MAT_VAL_TYPE *Tile_hyb_Val = matrix->Tile_hyb_Val; unsigned char *Tile_hyb_ellcolIdx = matrix->Tile_hyb_ellcolIdx; unsigned char *Tile_hyb_coorowIdx = matrix->Tile_hyb_coorowIdx; MAT_VAL_TYPE *Tile_dns_Val = matrix->Tile_dns_Val; MAT_VAL_TYPE *Tile_dnsrow_Val = matrix->Tile_dnsrow_Val; char *Tile_dnsrow_idx = matrix->Tile_dnsrow_idx; MAT_VAL_TYPE *Tile_dnscol_Val = matrix->Tile_dnscol_Val; char *Tile_dnscol_idx = matrix->Tile_dnscol_idx; int *denserowptr = matrix->denserowptr; int *densecolptr = matrix->densecolptr; unsigned short *mask = matrix->mask; unsigned thread = omp_get_max_threads(); unsigned char *csr_colidx_temp_g=(unsigned char*)malloc((thread * nnz_temp )*sizeof(unsigned char)); MAT_VAL_TYPE *csr_val_temp_g=(MAT_VAL_TYPE*)malloc((thread * nnz_temp)*sizeof(MAT_VAL_TYPE)); int *tile_count_g = (int *)malloc(thread * tile_count_temp * sizeof(int)); //for each tile #pragma omp parallel for for (int blki=0;blki<tilem;blki++) { int thread_id = omp_get_thread_num(); unsigned char *csr_colidx_temp = csr_colidx_temp_g + thread_id * nnz_temp; MAT_VAL_TYPE *csr_val_temp = csr_val_temp_g + thread_id * nnz_temp; int *tile_count = tile_count_g + thread_id * tile_count_temp; // unsigned char *csr_colidx_temp = (unsigned char *)malloc((nnz_temp )*sizeof(unsigned char)); // MAT_VAL_TYPE *csr_val_temp = (MAT_VAL_TYPE *)malloc((nnz_temp)*sizeof(MAT_VAL_TYPE)); // int *tile_count = (int *)malloc(tile_count_temp * sizeof(int)); memset(csr_colidx_temp, 0, (nnz_temp)*sizeof(unsigned char)); memset(csr_val_temp, 0, (nnz_temp)*sizeof(MAT_VAL_TYPE)); memset(tile_count, 0, (tile_count_temp)*sizeof(int)); int tilenum_per_row=tile_ptr[blki+1]-tile_ptr[blki]; int rowlen= blki==tilem-1 ? m-(tilem-1)*BLOCK_SIZE : BLOCK_SIZE ; int start = blki*BLOCK_SIZE; int end = blki==tilem-1 ? m : (blki+1)*BLOCK_SIZE ; // if (blki == 978) // { // printf("thread_id= ,tilenum_per_row=%i, nnz = %i\n", tilenum_per_row, rowpointerA[end]-rowpointerA[start]); // printf("start = %i, end = %i\n",start, end); // } for (int blkj = rowpointer[start]; blkj < rowpointer[end]; blkj ++) { int jc_temp = columnidx[blkj]/BLOCK_SIZE; // printf("blkj = %i,col=%i\n", blkj, jc_temp); for (int bi = 0; bi < tilenum_per_row; bi ++) { int tile_id = tile_ptr[blki]+bi; int jc = tile_columnidx[tile_id]; int pre_nnz = tile_nnz[tile_id] - tile_nnz[tile_ptr[blki]]; if (jc == jc_temp) { csr_val_temp[pre_nnz + tile_count[bi]] = value[blkj]; csr_colidx_temp[pre_nnz + tile_count[bi]] = columnidx[blkj] - jc * BLOCK_SIZE; // printf("tile_id = %i, tilennz = %i, jc = %i, prennz = %i, val[%i]=%f,col_before= %i, col[] = %i\n",tile_id, tilennz, jc, pre_nnz,pre_nnz + tile_count[bi],csr_val_temp[pre_nnz + tile_count[bi]], columnindexA[blkj],csr_colidx_temp[pre_nnz + tile_count[bi]]); tile_count[bi] ++; break; } } } for (int bi = 0; bi < tilenum_per_row; bi ++) { int tile_id = tile_ptr[blki]+bi; int pre_nnz = tile_nnz[tile_id] - tile_nnz[tile_ptr[blki]]; int tilennz = tile_nnz[tile_id +1] - tile_nnz[tile_id]; //blknnz[tile_id+1] - blknnz[tile_id] ; int collen = tile_columnidx[tile_id] == tilen-1 ? n - (tilen-1 ) * BLOCK_SIZE : BLOCK_SIZE ; int format = Format[tile_id]; switch (format) { case 0: { int offset = csr_offset[tile_id]; int ptr_offset = csrptr_offset[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); for (int ri =0; ri < rowlen; ri ++) { int start = ptr_temp[ri]; int stop = ri == rowlen -1 ? tilennz : ptr_temp[ ri +1];; for (int k =start; k < stop; k ++) { unsigned char colidx = csr_colidx_temp[pre_nnz + k]; Tile_csr_Val[offset + k] = csr_val_temp[pre_nnz + k]; Tile_csr_Col[offset + k] = csr_colidx_temp[pre_nnz + k]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } Tile_csr_Ptr[ptr_offset+ ri] = ptr_temp[ri]; } // for (int k = 0; k < tilennz; k++) // { // Tile_csr_Val[offset + k] = csr_val_temp[pre_nnz + k]; // Tile_csr_Col[offset + k] = csr_colidx_temp[pre_nnz + k]; // } // //CSR ptr // for (int pid=0; pid<rowlen; pid++) // { // Tile_csr_Ptr[ptr_offset+ pid] = ptr_temp[pid]; // mask[tile_id * BLOCK_SIZE + pid] |= (0x1 << (BLOCK_SIZE - colidx - 1)); // // printf("tile_csr_ptr = %i, csr_ptr = %i\n", Tile_csr_Ptr[ptr_offset+ pid] , csr_ptr[tile_id * BLOCK_SIZE + pid]); // } // unsigned char old_val = Tile_csr_Ptr[ptr_offset]; // unsigned char new_val; // Tile_csr_Ptr[ptr_offset] =0; // for (int pid =1; pid < BLOCK_SIZE; pid ++) // { // new_val = Tile_csr_Ptr[ptr_offset+pid]; // Tile_csr_Ptr[ptr_offset+pid] = old_val + Tile_csr_Ptr[ptr_offset+pid -1]; // old_val = new_val; // } break; } case 1: { if(SPMV && !SPGEMM) { // printf("do spmv operation\n"); int colidx_temp = tile_columnidx[tile_id]; int offset_new = new_coocount[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ ri +1]; for (int j = ptr_temp[ri]; j < nnz_end; j++) { new_coo_rowidx[offset_new + j] = ri + blki * BLOCK_SIZE; new_coo_value[offset_new + j] = csr_val_temp[pre_nnz + j] ; new_coo_colidx[offset_new + j]=csr_colidx_temp[pre_nnz + j] + colidx_temp * BLOCK_SIZE; } } } if(SPGEMM && !SPMV) { // printf("do spgemm operation\n"); int offset = coo_offset[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, rowlen); for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ ri +1]; for (int j = ptr_temp[ ri]; j < nnz_end; j++) { unsigned char colidx = csr_colidx_temp[pre_nnz + j]; Tile_coo_rowIdx[offset+ j] = ri; Tile_coo_Val[offset + j] = csr_val_temp[pre_nnz + j] ; Tile_coo_colIdx[offset + j]=csr_colidx_temp[pre_nnz + j]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } } break; } case 2: { int offset = ell_offset[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ri +1]; for (int j = ptr_temp[ri]; j < nnz_end; j++) { int colidx = csr_colidx_temp[pre_nnz + j]; int temp = j - ptr_temp[ri]; Tile_ell_colIdx[offset + temp * rowlen + ri] = csr_colidx_temp[pre_nnz + j]; Tile_ell_Val[offset + temp * rowlen + ri] = csr_val_temp[pre_nnz + j]; // mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } for (int ri =0; ri < rowlen; ri ++) { for (int bi = 0; bi < blkwidth[tile_id]; bi ++) { int colidx = Tile_ell_colIdx[offset + bi * rowlen + ri]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } break; } case 3: { int colidx_temp = tile_columnidx[tile_id]; int offset = hyb_offset[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); int offset_new = new_coocount[tile_id]; int coocount=0; for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ri +1]; int stop= (nnz_end- ptr_temp[ri]) <= blkwidth[tile_id] ? nnz_end : ptr_temp[ri] + blkwidth[tile_id] ; for (int j = ptr_temp[ri]; j < stop; j++) { int colidx = csr_colidx_temp[pre_nnz + j]; // printf("row = %i, j = %i, colidx = %i\n",ri,j,colidx); int temp = j - ptr_temp[ri]; Tile_hyb_ellcolIdx[offset + temp * rowlen + ri] = csr_colidx_temp[pre_nnz + j]; Tile_hyb_Val[offset + temp * rowlen + ri] = csr_val_temp[pre_nnz + j]; // mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); // printf("pos = %i, mask = %i\n", ri, mask[tile_id * BLOCK_SIZE + ri]); } if (SPGEMM && !SPMV) { for (int k=stop; k< nnz_end; k++) { unsigned char colidx = csr_colidx_temp[pre_nnz +k]; Tile_hyb_Val[offset + blkwidth[tile_id] * rowlen + coocount] = csr_val_temp[pre_nnz +k]; Tile_hyb_ellcolIdx[offset + blkwidth[tile_id] * rowlen + coocount] = csr_colidx_temp[pre_nnz +k]; Tile_hyb_coorowIdx[hyb_coocount[tile_id] + coocount] = ri; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); coocount++; } } if(SPMV && !SPGEMM) { for (int k=stop; k< nnz_end; k++) { new_coo_value[offset_new + coocount] = csr_val_temp[pre_nnz +k]; new_coo_colidx[offset_new+coocount] = csr_colidx_temp[pre_nnz +k] + colidx_temp * BLOCK_SIZE; new_coo_rowidx[offset_new+coocount] = ri + blki * BLOCK_SIZE; coocount++; } } } for (int ri =0; ri < rowlen; ri ++) { for (int bi = 0; bi < blkwidth[tile_id]; bi ++) { int colidx = Tile_hyb_ellcolIdx[offset + bi * rowlen + ri]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } break; } case 4: { int offset = dns_offset[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ri +1]; for (int j = ptr_temp[ri]; j < nnz_end; j++) { unsigned char colidx = csr_colidx_temp[pre_nnz +j]; Tile_dns_Val[offset + csr_colidx_temp[pre_nnz + j] * rowlen +ri] = csr_val_temp[pre_nnz + j]; // mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); // Blockdense_Val[dnsnum[rowblock_ptr[rbi]+bi] + subrowmatrixA[bi].columnindex[j] * rowlength + ri]= subrowmatrixA[bi].value[j]; } } for (int ri =0; ri < rowlen; ri ++) { for(int j =0; j < BLOCK_SIZE; j ++) { mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - j - 1)); } } break; } case 5: { int offset = dnsrow_offset[tile_id]; int rowoffset = denserowptr[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); int dnsriid=0; for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ri +1]; if (nnz_end - ptr_temp[ri] == collen) { // printf("tileid = %i, offset = %i, rowoffset = %i, num = %i\n", tile_id, offset, rowoffset, csr_ptr[tile_id * BLOCK_SIZE + ri]); Tile_dnsrow_idx[rowoffset + dnsriid]=ri; dnsriid ++; for (int j = ptr_temp[ri]; j < nnz_end; j++) { unsigned char colidx = csr_colidx_temp[pre_nnz +j]; Tile_dnsrow_Val[offset + j] = csr_val_temp[pre_nnz + j]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } } break; } case 6: { int offset = dnscol_offset[tile_id]; int coloffset = densecolptr[tile_id]; unsigned char *ptr_temp = csr_ptr + tile_id * BLOCK_SIZE; exclusive_scan_char(ptr_temp, BLOCK_SIZE); // for (int ni =0; ni < rowlen ; ni ++) // { // printf("%i ", ptr_temp[ni]); // } // printf("\n"); int dnsciid=0; for (int j=ptr_temp[0];j < ptr_temp[1];j ++) { int ci = csr_colidx_temp[pre_nnz + j]; // int ci = subrowmatrixA[bi].columnindex[j] ; Tile_dnscol_idx[coloffset + dnsciid] =ci ; // printf("pos=%i, colidx=%i\n",densecolptr[tile_id + dnsciid],Tile_dnscol_idx[coloffset + dnsciid] ); dnsciid++; } for (int ri = 0; ri < rowlen; ri++) { int nnz_end = ri == rowlen -1 ? tilennz : ptr_temp[ri +1]; for (int j = ptr_temp[ri]; j < nnz_end; j++) { int temp = j - ptr_temp[ri]; unsigned char colidx = csr_colidx_temp[pre_nnz +j];//temp; // if (csr_val_temp[pre_nnz +j] != 0) // printf("idx = %i, col=%i, val = %f\n",pre_nnz +j, csr_colidx_temp[pre_nnz +j] , csr_val_temp[pre_nnz +j]); Tile_dnscol_Val[offset + temp * rowlen + ri] = csr_val_temp[pre_nnz +j]; mask[tile_id * BLOCK_SIZE + ri] |= (0x1 << (BLOCK_SIZE - colidx - 1)); } } break; } default: break; } } } free(csr_colidx_temp_g); free(csr_val_temp_g); free(tile_count_g); } void Tile_destroy(Beidou_Tile_Matrix *matrix) { free(matrix->Tile_csr_Col); matrix->Tile_csr_Col = NULL; free(matrix->Tile_csr_Ptr); matrix->Tile_csr_Ptr = NULL; free(matrix->Tile_csr_Val); matrix->Tile_csr_Val = NULL; free(matrix->Tile_coo_colIdx); matrix->Tile_coo_colIdx = NULL; free(matrix->Tile_coo_rowIdx); matrix->Tile_coo_rowIdx = NULL; free(matrix->Tile_coo_Val); matrix->Tile_coo_Val = NULL; free(matrix->Tile_ell_colIdx); matrix->Tile_ell_colIdx = NULL; free(matrix->Tile_ell_Val); matrix->Tile_ell_Val = NULL; free(matrix->Tile_hyb_coorowIdx); matrix->Tile_hyb_coorowIdx = NULL; free(matrix->Tile_hyb_ellcolIdx); matrix->Tile_hyb_ellcolIdx = NULL; free(matrix->Tile_hyb_Val); matrix->Tile_hyb_Val = NULL; free(matrix->Tile_dns_Val); matrix->Tile_dns_Val = NULL; free(matrix->Tile_dnsrow_idx); matrix->Tile_dnsrow_idx = NULL; free(matrix->Tile_dnsrow_Val); matrix->Tile_dnsrow_Val = NULL; free(matrix->Tile_dnscol_Val); matrix->Tile_dnscol_Val = NULL; free(matrix->Tile_dnscol_idx); matrix->Tile_dnscol_idx = NULL; free(matrix->densecolptr); matrix->densecolptr = NULL; free(matrix->denserowptr); matrix->denserowptr = NULL; free(matrix->blkwidth); matrix->blkwidth = NULL; free(matrix->tile_ptr); matrix->tile_ptr = NULL; free(matrix->tile_columnidx); matrix->tile_columnidx = NULL; free(matrix->tile_nnz); matrix->tile_nnz = NULL; free(matrix->blknnz); matrix->blknnz = NULL; free(matrix->value); matrix->value = NULL; free(matrix->columnidx); matrix->columnidx = NULL; free(matrix->coo_new_matrix_ptr); matrix->coo_new_matrix_ptr = NULL; free(matrix->coo_new_rowidx); matrix->coo_new_rowidx = NULL; free(matrix->coo_new_matrix_value); matrix->coo_new_matrix_value = NULL; free(matrix->coo_new_matrix_colidx); matrix->coo_new_matrix_colidx = NULL; free(matrix->csr_ptr); free(matrix->csr_offset); free(matrix->csrptr_offset); free(matrix->coo_offset); free(matrix->ell_offset); free(matrix->hyb_offset); free(matrix->dns_offset); free(matrix->dnsrow_offset); free(matrix->dnscol_offset); } void format_transform(Beidou_Tile_Matrix *matrix, MAT_VAL_TYPE **new_coo_value_temp, int **new_coo_colidx_temp, int **new_coo_rowidx_temp, int **new_coocount_temp) { //unsigned char *csr_ptr = matrix->csr_ptr; // int *csr_offset = matrix->csr_offset; // int *csrptr_offset = matrix->csrptr_offset; // int *coo_offset = matrix->coo_offset; // int *ell_offset = matrix->ell_offset; // int *hyb_offset = matrix->hyb_offset; // int *dns_offset = matrix->dns_offset; // int *dnsrow_offset = matrix->dnsrow_offset; // int *dnscol_offset = matrix->dnscol_offset; // int *hyb_coocount = matrix->hyb_coocount; struct timeval t1, t2; gettimeofday(&t1, NULL); // step1_kernel(matrixA->m, matrixA->n, matrixA->rowpointer, matrixA->columnidx, // matrixA->tilem, matrixA->tilen, matrixA->tile_ptr, &matrixA->numtile); step1_kernel(matrix); gettimeofday(&t2, NULL); // printf("t1=%f,t2=%f\n",t1,t2); double time_step1 = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; printf("transform_step1 runtime = %4.5f ms\n", time_step1); exclusive_scan(matrix->tile_ptr, matrix->tilem+1); matrix->numtile = matrix->tile_ptr[matrix->tilem]; printf("the number of tiles in matrix A= %d\n",matrix->numtile); matrix->tile_columnidx=(int *)malloc(matrix->numtile*sizeof(int)); memset(matrix->tile_columnidx, 0, matrix->numtile*sizeof(int)); matrix->tile_nnz = (int *)malloc((matrix->numtile + 1)* sizeof(int)); //real nnz of each sparse tile memset(matrix->tile_nnz,0,(matrix->numtile + 1) * sizeof(int)); matrix->csr_ptr = (unsigned char *)malloc((matrix->numtile * BLOCK_SIZE) * sizeof(unsigned char)); memset (matrix->csr_ptr, 0, (matrix->numtile * BLOCK_SIZE) * sizeof(unsigned char)); gettimeofday(&t1, NULL); // step2_kernel_new(matrixA->m, matrixA->n, matrixA->rowpointer, matrixA->columnidx, // matrixA->tilem, matrixA->tilen, matrixA->tile_ptr, matrixA->tile_columnidx, matrixA->tile_nnz, csr_ptr, matrixA->numtile); step2_kernel_new(matrix, matrix->csr_ptr); gettimeofday(&t2, NULL); double time_step2 = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; printf("transform_step2 runtime = %4.5f ms\n", time_step2); exclusive_scan(matrix->tile_nnz, matrix->numtile +1); //format 0-7 represent 7 formats: CSR, COO, ELL, HYB, Dns, DnsRow, DnsCol matrix->Format =(char *)malloc(matrix->numtile* sizeof(char)); memset(matrix->Format,0,matrix->numtile * sizeof(char)); matrix->blknnz = (int *)malloc((matrix->numtile + 1)* sizeof(int)); //space cost that need allocate memset(matrix->blknnz,0,(matrix->numtile + 1) * sizeof(int)); //dense int dense_size=0; matrix->dns_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->dns_offset, 0, (matrix->numtile+1) * sizeof(int)); //denserow matrix->denserowptr = (int *)malloc((matrix->numtile + 1) * sizeof(int)); memset(matrix->denserowptr,0,(matrix->numtile+ 1) * sizeof(int)); int denserow_size =0 ; matrix->dnsrow_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->dnsrow_offset, 0, (matrix->numtile+1) * sizeof(int)); //densecolumn matrix->densecolptr = (int *)malloc((matrix->numtile + 1) * sizeof(int)); memset(matrix->densecolptr,0,(matrix->numtile+ 1) * sizeof(int)); int densecol_size =0 ; matrix->dnscol_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->dnscol_offset, 0, (matrix->numtile+1) * sizeof(int)); //CSR int csrsize=0; // int csrptrlen=0; matrix->csr_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->csr_offset, 0, (matrix->numtile+1) * sizeof(int)); matrix->csrptr_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->csrptr_offset, 0, (matrix->numtile+1) * sizeof(int)); //ELL int ellsize =0; matrix->ell_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->ell_offset, 0, (matrix->numtile+1) * sizeof(int)); //COO int coosize =0; matrix->coo_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->coo_offset, 0, (matrix->numtile+1) * sizeof(int)); //HYB int hybellsize =0; int hybcoosize =0; int hybsize =0; matrix->blkwidth = (char *)malloc(matrix->numtile*sizeof(char)); memset(matrix->blkwidth,0,matrix->numtile * sizeof(char)) ; matrix->hyb_coocount= (int *)malloc((matrix->numtile + 1) * sizeof(int)); memset(matrix->hyb_coocount,0,(matrix->numtile + 1) * sizeof(int)) ; matrix->hyb_offset = (int *)malloc((matrix->numtile+1) * sizeof(int)); memset(matrix->hyb_offset, 0, (matrix->numtile+1) * sizeof(int)); *new_coocount_temp = (int *)malloc((matrix->numtile + 1) * sizeof(int)); memset(*new_coocount_temp,0,(matrix->numtile + 1) * sizeof(int)) ; int *new_coocount = *new_coocount_temp; gettimeofday(&t1, NULL); // step3_kernel_new(matrixA->m, matrixA->n, matrixA->rowpointer, matrixA->columnidx, // matrixA->tilem, matrixA->tilen, matrixA->numtile, matrixA->tile_ptr, matrixA->tile_columnidx, matrixA->tile_nnz, matrixA->Format, // csr_ptr, matrixA->blknnz, matrixA->blkwidth, hyb_coocount, // matrixA->denserowptr, matrixA->densecolptr, // csr_offset, csrptr_offset, coo_offset, ell_offset, hyb_offset, dns_offset, dnsrow_offset, dnscol_offset); step3_kernel_new(matrix, new_coocount); gettimeofday(&t2, NULL); double time_step3 = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; printf("transform_step3 runtime = %4.5f ms\n", time_step3); exclusive_scan(matrix->csr_offset, matrix->numtile +1); exclusive_scan(matrix->csrptr_offset, matrix->numtile +1); exclusive_scan(matrix->coo_offset, matrix->numtile +1); exclusive_scan(matrix->ell_offset, matrix->numtile +1); exclusive_scan(matrix->hyb_offset, matrix->numtile +1); exclusive_scan(matrix->dns_offset, matrix->numtile +1); exclusive_scan(matrix->dnsrow_offset, matrix->numtile +1); exclusive_scan(matrix->dnscol_offset, matrix->numtile +1); exclusive_scan(matrix->denserowptr,matrix->numtile+1); exclusive_scan(matrix->densecolptr,matrix->numtile+1); exclusive_scan(matrix->hyb_coocount, matrix->numtile +1); hybcoosize = matrix->hyb_coocount[matrix->numtile]; exclusive_scan(new_coocount, matrix->numtile +1); matrix->coocount = new_coocount[ matrix->numtile]; for (int blki=0;blki<matrix->tilem;blki++) { int rowlength= blki==matrix->tilem-1 ? matrix->m-(matrix->tilem-1)*BLOCK_SIZE : BLOCK_SIZE ; int rowbnum=matrix->tile_ptr[blki+1]-matrix->tile_ptr[blki]; for (int bi=0;bi<rowbnum;bi++) { char format= matrix->Format[matrix->tile_ptr[blki]+bi]; switch (format) { case 0: //csr csrsize += matrix->blknnz[matrix->tile_ptr[blki]+bi]; // csrptrlen += rowlength ; break; case 1: //coo coosize += matrix->blknnz[matrix->tile_ptr[blki]+bi]; break; case 2: //ell ellsize += matrix->blknnz[matrix->tile_ptr[blki]+bi] ; break; case 3: //hyb hybsize += matrix->blknnz[matrix->tile_ptr[blki]+bi]; hybellsize += matrix->blkwidth[matrix->tile_ptr[blki]+bi] * rowlength; break; case 4: dense_size += matrix->blknnz[matrix->tile_ptr[blki]+bi]; break; case 5: denserow_size += matrix->blknnz[matrix->tile_ptr[blki]+bi]; break; case 6: densecol_size += matrix->blknnz[matrix->tile_ptr[blki]+bi]; break; default: break; } } } exclusive_scan(matrix->blknnz,(matrix->numtile+1)); int *formatnum = (int *)malloc(7 * sizeof(int)); memset(formatnum,0,7 * sizeof(int)); for (int j=0;j<7;j++) { for (int i=0;i<matrix->numtile;i++) { if (matrix->Format[i]==j) { formatnum[j]++; // printf("%d ",Format[i]); // break ; } } } for (int j=0;j<7;j++) { printf("format =%i,count =%i\n",j,formatnum[j]); } int csrtilecount = formatnum[0]; int nnz_temp =0; int tile_count_temp =0; for (int blki =0;blki < matrix->tilem; blki ++) { int start= blki*BLOCK_SIZE; int end = blki==matrix->tilem-1 ? matrix->m : (blki+1)*BLOCK_SIZE ; nnz_temp = nnz_temp < matrix->rowpointer[end] - matrix->rowpointer[start] ? matrix->rowpointer[end] - matrix->rowpointer[start] : nnz_temp; tile_count_temp = tile_count_temp < matrix->tile_ptr[blki +1] - matrix->tile_ptr[blki] ? matrix->tile_ptr[blki +1] - matrix->tile_ptr[blki] : tile_count_temp; } //CSR matrix->Tile_csr_Val=(MAT_VAL_TYPE*)malloc((csrsize)*sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_csr_Val, 0, (csrsize)*sizeof(MAT_VAL_TYPE)); matrix->Tile_csr_Col=(unsigned char*)malloc((csrsize)*sizeof(unsigned char)); memset(matrix->Tile_csr_Col, 0, (csrsize)*sizeof(unsigned char)); matrix->Tile_csr_Ptr=(unsigned char*)malloc((csrtilecount * BLOCK_SIZE)*sizeof(unsigned char)); memset(matrix->Tile_csr_Ptr, 0, (csrtilecount * BLOCK_SIZE )*sizeof(unsigned char)); //COO matrix->Tile_coo_Val=(MAT_VAL_TYPE*)malloc((coosize)*sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_coo_Val, 0, (coosize)*sizeof(MAT_VAL_TYPE)); matrix->Tile_coo_colIdx=(unsigned char*)malloc((coosize)*sizeof(unsigned char)); memset(matrix->Tile_coo_colIdx, 0, (coosize)*sizeof(unsigned char)); matrix->Tile_coo_rowIdx=(unsigned char*)malloc((coosize)*sizeof(unsigned char)); memset(matrix->Tile_coo_rowIdx, 0, (coosize)*sizeof(unsigned char)); //ELL matrix->Tile_ell_Val=(MAT_VAL_TYPE*)malloc((ellsize)*sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_ell_Val,0,(ellsize)*sizeof(MAT_VAL_TYPE)); matrix->Tile_ell_colIdx=(unsigned char*)malloc((ellsize)*sizeof(unsigned char)); memset(matrix->Tile_ell_colIdx, 0, sizeof(unsigned char) * ellsize); //HYB matrix->Tile_hyb_Val=(MAT_VAL_TYPE*)malloc((hybellsize+hybcoosize)*sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_hyb_Val,0,(hybellsize+hybcoosize)*sizeof(MAT_VAL_TYPE)); matrix->Tile_hyb_ellcolIdx=(unsigned char*)malloc((hybellsize+hybcoosize)*sizeof(unsigned char)); matrix->Tile_hyb_coorowIdx=(unsigned char*)malloc((hybcoosize)*sizeof(unsigned char)) ; memset(matrix->Tile_hyb_ellcolIdx, 0, sizeof(unsigned char) * (hybellsize+hybcoosize)); memset(matrix->Tile_hyb_coorowIdx, 0, sizeof(unsigned char) * hybcoosize); //dense matrix->Tile_dns_Val=(MAT_VAL_TYPE*)malloc((dense_size)*sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_dns_Val,0,dense_size * sizeof(MAT_VAL_TYPE)); //dense row matrix->Tile_dnsrow_Val=(MAT_VAL_TYPE*)malloc((denserow_size) * sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_dnsrow_Val,0,denserow_size * sizeof(MAT_VAL_TYPE)); matrix->Tile_dnsrow_idx = (char *)malloc(matrix->denserowptr[matrix->numtile] * sizeof(char)); memset(matrix->Tile_dnsrow_idx, 0, matrix->denserowptr[matrix->numtile] * sizeof(char)); //dense column matrix->Tile_dnscol_Val=(MAT_VAL_TYPE*)malloc((densecol_size) * sizeof(MAT_VAL_TYPE)); memset(matrix->Tile_dnscol_Val, 0, densecol_size * sizeof(MAT_VAL_TYPE)); matrix->Tile_dnscol_idx = (char *)malloc(matrix->densecolptr[matrix->numtile] * sizeof(char)); memset(matrix->Tile_dnscol_idx, 0, matrix->densecolptr[matrix->numtile] * sizeof(char)); //extract COO to a new matrix matrix->coocount = hybcoosize + coosize; *new_coo_value_temp = (MAT_VAL_TYPE*)malloc(matrix->coocount *sizeof(MAT_VAL_TYPE)); memset(*new_coo_value_temp, 0, (matrix->coocount) *sizeof(MAT_VAL_TYPE)); MAT_VAL_TYPE *new_coo_value = *new_coo_value_temp; *new_coo_rowidx_temp = (int *)malloc((hybcoosize+ coosize) *sizeof(int)); memset(*new_coo_rowidx_temp, 0, (hybcoosize+coosize) *sizeof(int)); int *new_coo_rowidx = *new_coo_rowidx_temp; *new_coo_colidx_temp = (int *)malloc((matrix->coocount) *sizeof(int)); memset(*new_coo_colidx_temp, 0, (matrix->coocount) *sizeof(int)); int *new_coo_colidx = *new_coo_colidx_temp; //mask matrix->mask = (unsigned short *)malloc(matrix->numtile * BLOCK_SIZE * sizeof(unsigned short)); memset(matrix->mask, 0, matrix->numtile * BLOCK_SIZE * sizeof(unsigned short)); gettimeofday(&t1, NULL); // step4_kernel(matrixA->m, matrixA->n, matrixA->rowpointer, matrixA->columnidx, matrixA->value, // matrixA->tilem, matrixA->tilen, matrixA->numtile, matrixA->tile_ptr, matrixA->tile_columnidx, matrixA->tile_nnz, matrixA->Format, // matrixA->blknnz, csr_ptr, nnz_temp, tile_count_temp, // matrixA->Tile_csr_Val, matrixA->Tile_csr_Col, matrixA->Tile_csr_Ptr, csr_offset, csrptr_offset, // matrixA->Tile_coo_Val, matrixA->Tile_coo_colIdx, matrixA->Tile_coo_rowIdx, coo_offset, // matrixA->Tile_ell_Val, matrixA->Tile_ell_colIdx, matrixA->blkwidth, ell_offset, // matrixA->Tile_hyb_Val, matrixA->Tile_hyb_ellcolIdx, matrixA->Tile_hyb_coorowIdx, hyb_coocount, hyb_offset, // matrixA->Tile_dns_Val, dns_offset, // matrixA->Tile_dnsrow_Val, matrixA->Tile_dnsrow_idx, matrixA->denserowptr, dnsrow_offset, // matrixA->Tile_dnscol_Val, matrixA->Tile_dnscol_idx, matrixA->densecolptr, dnscol_offset); step4_kernel(matrix, matrix->csr_ptr, matrix->hyb_coocount, nnz_temp, tile_count_temp, matrix->csr_offset, matrix->csrptr_offset, matrix->coo_offset, matrix->ell_offset, matrix->hyb_offset, matrix->dns_offset, matrix->dnsrow_offset, matrix->dnscol_offset, new_coo_value,new_coo_colidx, new_coo_rowidx, new_coocount); gettimeofday(&t2, NULL); double time_step4 = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; printf("transform_step4 runtime = %4.5f ms\n", time_step4); } #endif
GB_unop__identity_uint64_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_uint8) // op(A') function: GB (_unop_tran__identity_uint64_uint8) // C type: uint64_t // A type: uint8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_uint8) ( uint64_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_uint8) // op(A') function: GB (_unop_tran__identity_uint64_uint8) // C type: uint64_t // A type: uint8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_uint8) ( uint64_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_uint8) // op(A') function: GB (_unop_tran__identity_uint64_uint8) // C type: uint64_t // A type: uint8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_uint8) ( uint64_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
simd-7.c
/* { dg-do run } */ /* { dg-additional-options "-msse2" { target sse2_runtime } } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ extern void abort (); int a[1024] __attribute__((aligned (32))) = { 1 }; int b[1024] __attribute__((aligned (32))) = { 1 }; int k, m; struct U { int u; }; struct V { int v; }; __attribute__((noinline, noclone)) int foo (int *p) { int i, s = 0; struct U u; struct V v; #pragma omp simd aligned(a, p : 32) linear(k: m + 1) \ linear(i) reduction(+:s) lastprivate(u, v) for (i = 0; i < 1024; i++) { int *q = &i; a[i] *= p[i]; u.u = p[i] + k; k += m + 1; v.v = p[i] + k; s += p[i] + k; } if (u.u != 36 + 4 + 3 * 1023 || v.v != 36 + 4 + 3 * 1024 || i != 1024) abort (); return s; } __attribute__((noinline, noclone)) int bar (int *p) { int i, s = 0; struct U u; struct V v; #pragma omp simd aligned(a, p : 32) linear(k: m + 1) \ reduction(+:s) lastprivate(u, v) for (i = 0; i < 1024; i++) { int *q = &i; a[i] *= p[i]; u.u = p[i] + k; k += m + 1; v.v = p[i] + k; s += p[i] + k; } if (u.u != 36 + 4 + 3 * 1023 || v.v != 36 + 4 + 3 * 1024 || i != 1024) abort (); return s; } int main () { #if __SIZEOF_INT__ >= 4 int i; k = 4; m = 2; for (i = 0; i < 1024; i++) { a[i] = i - 512; b[i] = (i - 51) % 39; } int s = foo (b); for (i = 0; i < 1024; i++) { if (b[i] != (i - 51) % 39 || a[i] != (i - 512) * b[i]) abort (); } if (k != 4 + 3 * 1024 || s != 1596127) abort (); k = 4; m = 2; for (i = 0; i < 1024; i++) { a[i] = i - 512; b[i] = (i - 51) % 39; } s = bar (b); for (i = 0; i < 1024; i++) { if (b[i] != (i - 51) % 39 || a[i] != (i - 512) * b[i]) abort (); } if (k != 4 + 3 * 1024 || s != 1596127) abort (); #endif return 0; }
/* { dg-do run } */ /* { dg-additional-options "-msse2" { target sse2_runtime } } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ extern void abort(); int a[1024] __attribute__((aligned(32))) = { 1 }; int b[1024] __attribute__((aligned(32))) = { 1 }; int k, m; struct U { int u; }; struct V { int v; }; __attribute__((noinline, noclone)) int foo(int *p) { int i, s = 0; struct U u; struct V v; linear(i) reduction(+:s) lastprivate(u, v) for (i = 0; i < 1024; i++) { int *q = &i; a[i] *= p[i]; u.u = p[i] + k; k += m + 1; v.v = p[i] + k; s += p[i] + k; } if (u.u != 36 + 4 + 3 * 1023 || v.v != 36 + 4 + 3 * 1024 || i != 1024) abort(); return s; } __attribute__((noinline, noclone)) int bar(int *p) { int i, s = 0; struct U u; struct V v; reduction(+:s) lastprivate(u, v) for (i = 0; i < 1024; i++) { int *q = &i; a[i] *= p[i]; u.u = p[i] + k; k += m + 1; v.v = p[i] + k; s += p[i] + k; } if (u.u != 36 + 4 + 3 * 1023 || v.v != 36 + 4 + 3 * 1024 || i != 1024) abort(); return s; } int main() { #if __SIZEOF_INT__ >= 4 int i; k = 4; m = 2; for (i = 0; i < 1024; i++) { a[i] = i - 512; b[i] = (i - 51) % 39; } int s = foo(b); for (i = 0; i < 1024; i++) { if (b[i] != (i - 51) % 39 || a[i] != (i - 512) * b[i]) abort(); } if (k != 4 + 3 * 1024 || s != 1596127) abort(); k = 4; m = 2; for (i = 0; i < 1024; i++) { a[i] = i - 512; b[i] = (i - 51) % 39; } s = bar(b); for (i = 0; i < 1024; i++) { if (b[i] != (i - 51) % 39 || a[i] != (i - 512) * b[i]) abort(); } if (k != 4 + 3 * 1024 || s != 1596127) abort(); #endif return 0; }
/* { dg-do run } */ /* { dg-additional-options "-msse2" { target sse2_runtime } } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ extern void abort(); int a[1024] __attribute__((aligned(32))) = { 1 }; int b[1024] __attribute__((aligned(32))) = { 1 }; int k, m; struct U { int u; }; struct V { int v; }; __attribute__((noinline, noclone)) int foo(int *p) { int i, s = 0; struct U u; struct V v; #pragma omp simd aligned(a, p : 32) linear(k: m + 1) \ linear(i) reduction(+:s) lastprivate(u, v) for (i = 0; i < 1024; i++) { int *q = &i; a[i] *= p[i]; u.u = p[i] + k; k += m + 1; v.v = p[i] + k; s += p[i] + k; } if (u.u != 36 + 4 + 3 * 1023 || v.v != 36 + 4 + 3 * 1024 || i != 1024) abort(); return s; } __attribute__((noinline, noclone)) int bar(int *p) { int i, s = 0; struct U u; struct V v; #pragma omp simd aligned(a, p : 32) linear(k: m + 1) \ reduction(+:s) lastprivate(u, v) for (i = 0; i < 1024; i++) { int *q = &i; a[i] *= p[i]; u.u = p[i] + k; k += m + 1; v.v = p[i] + k; s += p[i] + k; } if (u.u != 36 + 4 + 3 * 1023 || v.v != 36 + 4 + 3 * 1024 || i != 1024) abort(); return s; } int main() { #if __SIZEOF_INT__ >= 4 int i; k = 4; m = 2; for (i = 0; i < 1024; i++) { a[i] = i - 512; b[i] = (i - 51) % 39; } int s = foo(b); for (i = 0; i < 1024; i++) { if (b[i] != (i - 51) % 39 || a[i] != (i - 512) * b[i]) abort(); } if (k != 4 + 3 * 1024 || s != 1596127) abort(); k = 4; m = 2; for (i = 0; i < 1024; i++) { a[i] = i - 512; b[i] = (i - 51) % 39; } s = bar(b); for (i = 0; i < 1024; i++) { if (b[i] != (i - 51) % 39 || a[i] != (i - 512) * b[i]) abort(); } if (k != 4 + 3 * 1024 || s != 1596127) abort(); #endif return 0; }
GxB_Vector_Option_get.c
//------------------------------------------------------------------------------ // GxB_Vector_Option_get: get an option in a vector //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" GrB_Info GxB_Vector_Option_get // gets the current option of a vector ( GrB_Vector v, // vector to query GxB_Option_Field field, // option to query ... // return value of the vector option ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GxB_Vector_Option_get (v, field, &value)") ; GB_RETURN_IF_NULL_OR_FAULTY (v) ; ASSERT_VECTOR_OK (v, "v to get option", GB0) ; //-------------------------------------------------------------------------- // get the option //-------------------------------------------------------------------------- va_list ap ; switch (field) { case GxB_BITMAP_SWITCH : { va_start (ap, field) ; double *bitmap_switch = va_arg (ap, double *) ; va_end (ap) ; GB_RETURN_IF_NULL (bitmap_switch) ; (*bitmap_switch) = (double) v->bitmap_switch ; } break ; case GxB_SPARSITY_CONTROL : { va_start (ap, field) ; int *sparsity_control = va_arg (ap, int *) ; va_end (ap) ; GB_RETURN_IF_NULL (sparsity_control) ; (*sparsity_control) = v->sparsity_control ; } break ; case GxB_SPARSITY_STATUS : { va_start (ap, field) ; int *sparsity = va_arg (ap, int *) ; va_end (ap) ; GB_RETURN_IF_NULL (sparsity) ; (*sparsity) = GB_sparsity ((GrB_Matrix) v) ; } break ; case GxB_FORMAT : { // a GrB_Vector is always stored by-column va_start (ap, field) ; GxB_Format_Value *format = va_arg (ap, GxB_Format_Value *) ; va_end (ap) ; GB_RETURN_IF_NULL (format) ; (*format) = GxB_BY_COL ; } break ; case GxB_IS_HYPER : // historical; use GxB_SPARSITY_STATUS instead { // a GrB_Vector is never hypersparse va_start (ap, field) ; bool *v_is_hyper = va_arg (ap, bool *) ; va_end (ap) ; GB_RETURN_IF_NULL (v_is_hyper) ; (*v_is_hyper) = false ; } break ; default : return (GrB_INVALID_VALUE) ; } #pragma omp flush return (GrB_SUCCESS) ; }
// ------------------------------------------------------------------------------ //GxB_Vector_Option_get:get an option in a vector // ------------------------------------------------------------------------------ //SuiteSparse:GraphBLAS, Timothy A.Davis, (c) 2017 - 2022, All Rights Reserved. // SPDX - License - Identifier:Apache - 2.0 // ------------------------------------------------------------------------------ #include "GB.h" GrB_Info GxB_Vector_Option_get // gets the current option of a vector ( GrB_Vector v, //vector to query GxB_Option_Field field, //option to query ...// return value of the vector option ) { //-------------------------------------------------------------------------- //check inputs // -------------------------------------------------------------------------- GB_WHERE1("GxB_Vector_Option_get (v, field, &value)"); GB_RETURN_IF_NULL_OR_FAULTY(v); ASSERT_VECTOR_OK(v, "v to get option", GB0); //-------------------------------------------------------------------------- //get the option // -------------------------------------------------------------------------- va_list ap; switch (field) { case GxB_BITMAP_SWITCH: { va_start(ap, field); double *bitmap_switch = va_arg(ap, double *); va_end(ap); GB_RETURN_IF_NULL(bitmap_switch); (*bitmap_switch) = (double)v->bitmap_switch; } break; case GxB_SPARSITY_CONTROL: { va_start(ap, field); int *sparsity_control = va_arg(ap, int *); va_end(ap); GB_RETURN_IF_NULL(sparsity_control); (*sparsity_control) = v->sparsity_control; } break; case GxB_SPARSITY_STATUS: { va_start(ap, field); int *sparsity = va_arg(ap, int *); va_end(ap); GB_RETURN_IF_NULL(sparsity); (*sparsity) = GB_sparsity((GrB_Matrix) v); } break; case GxB_FORMAT: { //a GrB_Vector is always stored by - column va_start(ap, field); GxB_Format_Value *format = va_arg(ap, GxB_Format_Value *); va_end(ap); GB_RETURN_IF_NULL(format); (*format) = GxB_BY_COL; } break; case GxB_IS_HYPER: //historical; use GxB_SPARSITY_STATUS instead { //a GrB_Vector is never hypersparse va_start(ap, field); bool *v_is_hyper = va_arg(ap, bool *); va_end(ap); GB_RETURN_IF_NULL(v_is_hyper); (*v_is_hyper) = false; } break; default: return (GrB_INVALID_VALUE); } return (GrB_SUCCESS); }
// ------------------------------------------------------------------------------ //GxB_Vector_Option_get:get an option in a vector // ------------------------------------------------------------------------------ //SuiteSparse:GraphBLAS, Timothy A.Davis, (c) 2017 - 2022, All Rights Reserved. // SPDX - License - Identifier:Apache - 2.0 // ------------------------------------------------------------------------------ #include "GB.h" GrB_Info GxB_Vector_Option_get // gets the current option of a vector ( GrB_Vector v, //vector to query GxB_Option_Field field, //option to query ...// return value of the vector option ) { //-------------------------------------------------------------------------- //check inputs // -------------------------------------------------------------------------- GB_WHERE1("GxB_Vector_Option_get (v, field, &value)"); GB_RETURN_IF_NULL_OR_FAULTY(v); ASSERT_VECTOR_OK(v, "v to get option", GB0); //-------------------------------------------------------------------------- //get the option // -------------------------------------------------------------------------- va_list ap; switch (field) { case GxB_BITMAP_SWITCH: { va_start(ap, field); double *bitmap_switch = va_arg(ap, double *); va_end(ap); GB_RETURN_IF_NULL(bitmap_switch); (*bitmap_switch) = (double)v->bitmap_switch; } break; case GxB_SPARSITY_CONTROL: { va_start(ap, field); int *sparsity_control = va_arg(ap, int *); va_end(ap); GB_RETURN_IF_NULL(sparsity_control); (*sparsity_control) = v->sparsity_control; } break; case GxB_SPARSITY_STATUS: { va_start(ap, field); int *sparsity = va_arg(ap, int *); va_end(ap); GB_RETURN_IF_NULL(sparsity); (*sparsity) = GB_sparsity((GrB_Matrix) v); } break; case GxB_FORMAT: { //a GrB_Vector is always stored by - column va_start(ap, field); GxB_Format_Value *format = va_arg(ap, GxB_Format_Value *); va_end(ap); GB_RETURN_IF_NULL(format); (*format) = GxB_BY_COL; } break; case GxB_IS_HYPER: //historical; use GxB_SPARSITY_STATUS instead { //a GrB_Vector is never hypersparse va_start(ap, field); bool *v_is_hyper = va_arg(ap, bool *); va_end(ap); GB_RETURN_IF_NULL(v_is_hyper); (*v_is_hyper) = false; } break; default: return (GrB_INVALID_VALUE); } #pragma omp flush return (GrB_SUCCESS); }
convolutiondepthwise_3x3_int8.h
// SenseNets is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2018 SenseNets Technology Ltd. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; int *outptr = out; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; int *outptr = out; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
// SenseNets is pleased to support the open source community by supporting ncnn available. // //Copyright(C) 2018 SenseNets Technology Ltd.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_int8_sse(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, const Option & opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; int *outptr = out; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_int8_sse(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, const Option & opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char *kernel = _kernel; for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; int *outptr = out; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
// SenseNets is pleased to support the open source community by supporting ncnn available. // //Copyright(C) 2018 SenseNets Technology Ltd.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_int8_sse(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, const Option & opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; int *outptr = out; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_int8_sse(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, const Option & opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; int *outptr = out; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
convolution_3x3_pack8to4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack8to4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to4, int inch, int outch) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 4b-8a-inch/8a-64-outch/4b kernel_tm_pack8to4.create(2 * inch / 8, 64, outch / 8 + (outch % 8) / 4, (size_t)2u * 32, 32); int p = 0; for (; p + 7 < outch; p += 8) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); const Mat k4 = kernel_tm.channel(p + 4); const Mat k5 = kernel_tm.channel(p + 5); const Mat k6 = kernel_tm.channel(p + 6); const Mat k7 = kernel_tm.channel(p + 7); Mat g0 = kernel_tm_pack8to4.channel(p / 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00[1] = (__fp16)k1.row(q + i)[k]; g00[2] = (__fp16)k2.row(q + i)[k]; g00[3] = (__fp16)k3.row(q + i)[k]; g00[4] = (__fp16)k4.row(q + i)[k]; g00[5] = (__fp16)k5.row(q + i)[k]; g00[6] = (__fp16)k6.row(q + i)[k]; g00[7] = (__fp16)k7.row(q + i)[k]; g00 += 8; } } } } for (; p + 3 < outch; p += 4) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); Mat g0 = kernel_tm_pack8to4.channel(p / 8 + (p % 8) / 4); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00[1] = (__fp16)k1.row(q + i)[k]; g00[2] = (__fp16)k2.row(q + i)[k]; g00[3] = (__fp16)k3.row(q + i)[k]; g00 += 4; } } } } } static void conv3x3s1_winograd64_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const __fp16* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[8][8][8]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { float16x8_t _r00 = vld1q_f16(r0); float16x8_t _r01 = vld1q_f16(r0 + 8); float16x8_t _r02 = vld1q_f16(r0 + 16); float16x8_t _r03 = vld1q_f16(r0 + 24); float16x8_t _r04 = vld1q_f16(r0 + 32); float16x8_t _r05 = vld1q_f16(r0 + 40); float16x8_t _r06 = vld1q_f16(r0 + 48); float16x8_t _r07 = vld1q_f16(r0 + 56); float16x8_t _tmp0m = vfmaq_n_f16(vsubq_f16(_r00, _r06), vsubq_f16(_r04, _r02), 5.25f); float16x8_t _tmp7m = vfmaq_n_f16(vsubq_f16(_r07, _r01), vsubq_f16(_r03, _r05), 5.25f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_r02, _r06), _r04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float16x8_t _tmp1m = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _tmp2m = vsubq_f16(_tmp12a, _tmp12b); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_r06, _r02, 0.25f), _r04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float16x8_t _tmp3m = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _tmp4m = vsubq_f16(_tmp34a, _tmp34b); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_r06, vfmsq_n_f16(_r02, _r04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float16x8_t _tmp5m = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _tmp6m = vsubq_f16(_tmp56a, _tmp56b); vst1q_f16(tmp[5][m], _tmp5m); vst1q_f16(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 8; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * 8; __fp16* r0_tm_1 = r0_tm_0 + tiles * 8; __fp16* r0_tm_2 = r0_tm_0 + tiles * 16; __fp16* r0_tm_3 = r0_tm_0 + tiles * 24; __fp16* r0_tm_4 = r0_tm_0 + tiles * 32; __fp16* r0_tm_5 = r0_tm_0 + tiles * 40; __fp16* r0_tm_6 = r0_tm_0 + tiles * 48; __fp16* r0_tm_7 = r0_tm_0 + tiles * 56; for (int m = 0; m < 8; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp06 = vld1q_f16(tmp[m][6]); float16x8_t _tmp07 = vld1q_f16(tmp[m][7]); float16x8_t _r0tm0 = vfmaq_n_f16(vsubq_f16(_tmp00, _tmp06), vsubq_f16(_tmp04, _tmp02), 5.25f); float16x8_t _r0tm7 = vfmaq_n_f16(vsubq_f16(_tmp07, _tmp01), vsubq_f16(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_tmp02, _tmp06), _tmp04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float16x8_t _r0tm1 = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _r0tm2 = vsubq_f16(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float16x8_t _r0tm3 = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _r0tm4 = vsubq_f16(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_tmp06, vfmsq_n_f16(_tmp02, _tmp04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float16x8_t _r0tm5 = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _r0tm6 = vsubq_f16(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1q_f16(r0_tm_0, _r0tm0); vst1q_f16(r0_tm_1, _r0tm1); vst1q_f16(r0_tm_2, _r0tm2); vst1q_f16(r0_tm_3, _r0tm3); vst1q_f16(r0_tm_4, _r0tm4); vst1q_f16(r0_tm_5, _r0tm5); vst1q_f16(r0_tm_6, _r0tm6); vst1q_f16(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 64; r0_tm_1 += tiles * 64; r0_tm_2 += tiles * 64; r0_tm_3 += tiles * 64; r0_tm_4 += tiles * 64; r0_tm_5 += tiles * 64; r0_tm_6 += tiles * 64; r0_tm_7 += tiles * 64; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tm2p = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u * 4, 4, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; __fp16* output0_tm = top_blob_tm.channel(p); __fp16* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(p / 2); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[5] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v0.h[7] \n" "fmla v24.8h, v17.8h, v1.h[0] \n" "fmla v25.8h, v17.8h, v1.h[1] \n" "fmla v26.8h, v17.8h, v1.h[2] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[4] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[6] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v2.h[0] \n" "fmla v25.8h, v18.8h, v2.h[1] \n" "fmla v26.8h, v18.8h, v2.h[2] \n" "fmla v27.8h, v18.8h, v2.h[3] \n" "fmla v28.8h, v18.8h, v2.h[4] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[6] \n" "fmla v31.8h, v18.8h, v2.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" "fmla v24.8h, v19.8h, v3.h[0] \n" "fmla v25.8h, v19.8h, v3.h[1] \n" "fmla v26.8h, v19.8h, v3.h[2] \n" "fmla v27.8h, v19.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v3.h[4] \n" "fmla v29.8h, v19.8h, v3.h[5] \n" "fmla v30.8h, v19.8h, v3.h[6] \n" "fmla v31.8h, v19.8h, v3.h[7] \n" "fmla v24.8h, v20.8h, v4.h[0] \n" "fmla v25.8h, v20.8h, v4.h[1] \n" "fmla v26.8h, v20.8h, v4.h[2] \n" "fmla v27.8h, v20.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v4.h[5] \n" "fmla v30.8h, v20.8h, v4.h[6] \n" "fmla v31.8h, v20.8h, v4.h[7] \n" "fmla v24.8h, v21.8h, v5.h[0] \n" "fmla v25.8h, v21.8h, v5.h[1] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[3] \n" "fmla v28.8h, v21.8h, v5.h[4] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v5.h[7] \n" "fmla v24.8h, v22.8h, v6.h[0] \n" "fmla v25.8h, v22.8h, v6.h[1] \n" "fmla v26.8h, v22.8h, v6.h[2] \n" "fmla v27.8h, v22.8h, v6.h[3] \n" "fmla v28.8h, v22.8h, v6.h[4] \n" "fmla v29.8h, v22.8h, v6.h[5] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v23.8h, v7.h[0] \n" "fmla v25.8h, v23.8h, v7.h[1] \n" "fmla v26.8h, v23.8h, v7.h[2] \n" "fmla v27.8h, v23.8h, v7.h[3] \n" "fmla v28.8h, v23.8h, v7.h[4] \n" "fmla v29.8h, v23.8h, v7.h[5] \n" "fmla v30.8h, v23.8h, v7.h[6] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "ext v28.16b, v28.16b, v28.16b, #8 \n" "ext v29.16b, v29.16b, v29.16b, #8 \n" "ext v30.16b, v30.16b, v30.16b, #8 \n" "ext v31.16b, v31.16b, v31.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v24.8h, v17.8h, v0.h[4] \n" "fmla v25.8h, v17.8h, v0.h[5] \n" "fmla v26.8h, v17.8h, v0.h[6] \n" "fmla v27.8h, v17.8h, v0.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v1.h[0] \n" "fmla v25.8h, v18.8h, v1.h[1] \n" "fmla v26.8h, v18.8h, v1.h[2] \n" "fmla v27.8h, v18.8h, v1.h[3] \n" "fmla v24.8h, v19.8h, v1.h[4] \n" "fmla v25.8h, v19.8h, v1.h[5] \n" "fmla v26.8h, v19.8h, v1.h[6] \n" "fmla v27.8h, v19.8h, v1.h[7] \n" "fmla v24.8h, v20.8h, v2.h[0] \n" "fmla v25.8h, v20.8h, v2.h[1] \n" "fmla v26.8h, v20.8h, v2.h[2] \n" "fmla v27.8h, v20.8h, v2.h[3] \n" "fmla v24.8h, v21.8h, v2.h[4] \n" "fmla v25.8h, v21.8h, v2.h[5] \n" "fmla v26.8h, v21.8h, v2.h[6] \n" "fmla v27.8h, v21.8h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v22.8h, v3.h[0] \n" "fmla v25.8h, v22.8h, v3.h[1] \n" "fmla v26.8h, v22.8h, v3.h[2] \n" "fmla v27.8h, v22.8h, v3.h[3] \n" "fmla v24.8h, v23.8h, v3.h[4] \n" "fmla v25.8h, v23.8h, v3.h[5] \n" "fmla v26.8h, v23.8h, v3.h[6] \n" "fmla v27.8h, v23.8h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); float16x8_t _sum0 = vdupq_n_f16((__fp16)0.f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x8_t _k0 = vld1q_f16(kptr); float16x8_t _k1 = vld1q_f16(kptr + 8); float16x8_t _k2 = vld1q_f16(kptr + 16); float16x8_t _k3 = vld1q_f16(kptr + 24); float16x8_t _k4 = vld1q_f16(kptr + 32); float16x8_t _k5 = vld1q_f16(kptr + 40); float16x8_t _k6 = vld1q_f16(kptr + 48); float16x8_t _k7 = vld1q_f16(kptr + 56); _sum0 = vfmaq_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfmaq_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfmaq_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfmaq_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfmaq_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfmaq_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfmaq_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfmaq_laneq_f16(_sum0, _k7, _r0, 7); kptr += 64; r0 += 8; } vst1_f16(output0_tm, vget_low_f16(_sum0)); vst1_f16(output1_tm, vget_high_f16(_sum0)); output0_tm += 4; output1_tm += 4; } } } remain_outch_start += nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v28.4h, v16.4h, v0.h[4] \n" "fmla v29.4h, v16.4h, v0.h[5] \n" "fmla v30.4h, v16.4h, v0.h[6] \n" "fmla v31.4h, v16.4h, v0.h[7] \n" "fmla v24.4h, v17.4h, v1.h[0] \n" "fmla v25.4h, v17.4h, v1.h[1] \n" "fmla v26.4h, v17.4h, v1.h[2] \n" "fmla v27.4h, v17.4h, v1.h[3] \n" "fmla v28.4h, v17.4h, v1.h[4] \n" "fmla v29.4h, v17.4h, v1.h[5] \n" "fmla v30.4h, v17.4h, v1.h[6] \n" "fmla v31.4h, v17.4h, v1.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v2.h[0] \n" "fmla v25.4h, v18.4h, v2.h[1] \n" "fmla v26.4h, v18.4h, v2.h[2] \n" "fmla v27.4h, v18.4h, v2.h[3] \n" "fmla v28.4h, v18.4h, v2.h[4] \n" "fmla v29.4h, v18.4h, v2.h[5] \n" "fmla v30.4h, v18.4h, v2.h[6] \n" "fmla v31.4h, v18.4h, v2.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" "fmla v24.4h, v19.4h, v3.h[0] \n" "fmla v25.4h, v19.4h, v3.h[1] \n" "fmla v26.4h, v19.4h, v3.h[2] \n" "fmla v27.4h, v19.4h, v3.h[3] \n" "fmla v28.4h, v19.4h, v3.h[4] \n" "fmla v29.4h, v19.4h, v3.h[5] \n" "fmla v30.4h, v19.4h, v3.h[6] \n" "fmla v31.4h, v19.4h, v3.h[7] \n" "fmla v24.4h, v20.4h, v4.h[0] \n" "fmla v25.4h, v20.4h, v4.h[1] \n" "fmla v26.4h, v20.4h, v4.h[2] \n" "fmla v27.4h, v20.4h, v4.h[3] \n" "fmla v28.4h, v20.4h, v4.h[4] \n" "fmla v29.4h, v20.4h, v4.h[5] \n" "fmla v30.4h, v20.4h, v4.h[6] \n" "fmla v31.4h, v20.4h, v4.h[7] \n" "fmla v24.4h, v21.4h, v5.h[0] \n" "fmla v25.4h, v21.4h, v5.h[1] \n" "fmla v26.4h, v21.4h, v5.h[2] \n" "fmla v27.4h, v21.4h, v5.h[3] \n" "fmla v28.4h, v21.4h, v5.h[4] \n" "fmla v29.4h, v21.4h, v5.h[5] \n" "fmla v30.4h, v21.4h, v5.h[6] \n" "fmla v31.4h, v21.4h, v5.h[7] \n" "fmla v24.4h, v22.4h, v6.h[0] \n" "fmla v25.4h, v22.4h, v6.h[1] \n" "fmla v26.4h, v22.4h, v6.h[2] \n" "fmla v27.4h, v22.4h, v6.h[3] \n" "fmla v28.4h, v22.4h, v6.h[4] \n" "fmla v29.4h, v22.4h, v6.h[5] \n" "fmla v30.4h, v22.4h, v6.h[6] \n" "fmla v31.4h, v22.4h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v23.4h, v7.h[0] \n" "fmla v25.4h, v23.4h, v7.h[1] \n" "fmla v26.4h, v23.4h, v7.h[2] \n" "fmla v27.4h, v23.4h, v7.h[3] \n" "fmla v28.4h, v23.4h, v7.h[4] \n" "fmla v29.4h, v23.4h, v7.h[5] \n" "fmla v30.4h, v23.4h, v7.h[6] \n" "fmla v31.4h, v23.4h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v24.4h, v17.4h, v0.h[4] \n" "fmla v25.4h, v17.4h, v0.h[5] \n" "fmla v26.4h, v17.4h, v0.h[6] \n" "fmla v27.4h, v17.4h, v0.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v1.h[0] \n" "fmla v25.4h, v18.4h, v1.h[1] \n" "fmla v26.4h, v18.4h, v1.h[2] \n" "fmla v27.4h, v18.4h, v1.h[3] \n" "fmla v24.4h, v19.4h, v1.h[4] \n" "fmla v25.4h, v19.4h, v1.h[5] \n" "fmla v26.4h, v19.4h, v1.h[6] \n" "fmla v27.4h, v19.4h, v1.h[7] \n" "fmla v24.4h, v20.4h, v2.h[0] \n" "fmla v25.4h, v20.4h, v2.h[1] \n" "fmla v26.4h, v20.4h, v2.h[2] \n" "fmla v27.4h, v20.4h, v2.h[3] \n" "fmla v24.4h, v21.4h, v2.h[4] \n" "fmla v25.4h, v21.4h, v2.h[5] \n" "fmla v26.4h, v21.4h, v2.h[6] \n" "fmla v27.4h, v21.4h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v22.4h, v3.h[0] \n" "fmla v25.4h, v22.4h, v3.h[1] \n" "fmla v26.4h, v22.4h, v3.h[2] \n" "fmla v27.4h, v22.4h, v3.h[3] \n" "fmla v24.4h, v23.4h, v3.h[4] \n" "fmla v25.4h, v23.4h, v3.h[5] \n" "fmla v26.4h, v23.4h, v3.h[6] \n" "fmla v27.4h, v23.4h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); float16x4_t _sum0 = vdup_n_f16((__fp16)0.f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x4_t _k0 = vld1_f16(kptr); float16x4_t _k1 = vld1_f16(kptr + 4); float16x4_t _k2 = vld1_f16(kptr + 8); float16x4_t _k3 = vld1_f16(kptr + 12); float16x4_t _k4 = vld1_f16(kptr + 16); float16x4_t _k5 = vld1_f16(kptr + 20); float16x4_t _k6 = vld1_f16(kptr + 24); float16x4_t _k7 = vld1_f16(kptr + 28); _sum0 = vfma_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfma_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfma_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfma_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfma_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfma_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfma_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfma_laneq_f16(_sum0, _k7, _r0, 7); kptr += 32; r0 += 8; } vst1_f16(output0_tm, _sum0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u * 4, 4, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; float16x4_t _bias0 = bias ? vld1_f16((const __fp16*)bias + p * 4) : vdup_n_f16(0.f); __fp16 tmp[6][8][4]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * 4; const __fp16* output0_tm_1 = output0_tm_0 + tiles * 4; const __fp16* output0_tm_2 = output0_tm_0 + tiles * 8; const __fp16* output0_tm_3 = output0_tm_0 + tiles * 12; const __fp16* output0_tm_4 = output0_tm_0 + tiles * 16; const __fp16* output0_tm_5 = output0_tm_0 + tiles * 20; const __fp16* output0_tm_6 = output0_tm_0 + tiles * 24; const __fp16* output0_tm_7 = output0_tm_0 + tiles * 28; __fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 4; // TODO neon optimize for (int m = 0; m < 8; m++) { float16x4_t _out0tm0 = vld1_f16(output0_tm_0); float16x4_t _out0tm1 = vld1_f16(output0_tm_1); float16x4_t _out0tm2 = vld1_f16(output0_tm_2); float16x4_t _out0tm3 = vld1_f16(output0_tm_3); float16x4_t _out0tm4 = vld1_f16(output0_tm_4); float16x4_t _out0tm5 = vld1_f16(output0_tm_5); float16x4_t _out0tm6 = vld1_f16(output0_tm_6); float16x4_t _out0tm7 = vld1_f16(output0_tm_7); float16x4_t _tmp024a = vadd_f16(_out0tm1, _out0tm2); float16x4_t _tmp135a = vsub_f16(_out0tm1, _out0tm2); // float tmp024a = output0_tm[1] + output0_tm[2]; // float tmp135a = output0_tm[1] - output0_tm[2]; float16x4_t _tmp024b = vadd_f16(_out0tm3, _out0tm4); float16x4_t _tmp135b = vsub_f16(_out0tm3, _out0tm4); // float tmp024b = output0_tm[3] + output0_tm[4]; // float tmp135b = output0_tm[3] - output0_tm[4]; float16x4_t _tmp024c = vadd_f16(_out0tm5, _out0tm6); float16x4_t _tmp135c = vsub_f16(_out0tm5, _out0tm6); // float tmp024c = output0_tm[5] + output0_tm[6]; // float tmp135c = output0_tm[5] - output0_tm[6]; float16x4_t _tmp0m = vadd_f16(vadd_f16(_out0tm0, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f)); float16x4_t _tmp2m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float16x4_t _tmp4m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1_f16(tmp[0][m], _tmp0m); vst1_f16(tmp[2][m], _tmp2m); vst1_f16(tmp[4][m], _tmp4m); // tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; // tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; // tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; float16x4_t _tmp1m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float16x4_t _tmp3m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float16x4_t _tmp5m = vadd_f16(vadd_f16(_out0tm7, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f)); vst1_f16(tmp[1][m], _tmp1m); vst1_f16(tmp[3][m], _tmp3m); vst1_f16(tmp[5][m], _tmp5m); // tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; // tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; // tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float16x4_t _tmp00 = vld1_f16(tmp[m][0]); float16x4_t _tmp01 = vld1_f16(tmp[m][1]); float16x4_t _tmp02 = vld1_f16(tmp[m][2]); float16x4_t _tmp03 = vld1_f16(tmp[m][3]); float16x4_t _tmp04 = vld1_f16(tmp[m][4]); float16x4_t _tmp05 = vld1_f16(tmp[m][5]); float16x4_t _tmp06 = vld1_f16(tmp[m][6]); float16x4_t _tmp07 = vld1_f16(tmp[m][7]); float16x4_t _tmp024a = vadd_f16(_tmp01, _tmp02); float16x4_t _tmp135a = vsub_f16(_tmp01, _tmp02); // float tmp024a = tmp0[1] + tmp0[2]; // float tmp135a = tmp0[1] - tmp0[2]; float16x4_t _tmp024b = vadd_f16(_tmp03, _tmp04); float16x4_t _tmp135b = vsub_f16(_tmp03, _tmp04); // float tmp024b = tmp0[3] + tmp0[4]; // float tmp135b = tmp0[3] - tmp0[4]; float16x4_t _tmp024c = vadd_f16(_tmp05, _tmp06); float16x4_t _tmp135c = vsub_f16(_tmp05, _tmp06); // float tmp024c = tmp0[5] + tmp0[6]; // float tmp135c = tmp0[5] - tmp0[6]; float16x4_t _out00 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp00, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f))); float16x4_t _out02 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float16x4_t _out04 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1_f16(output0, _out00); vst1_f16(output0 + 8, _out02); vst1_f16(output0 + 16, _out04); // output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; // output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; // output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; float16x4_t _out01 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float16x4_t _out03 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float16x4_t _out05 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp07, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f))); vst1_f16(output0 + 4, _out01); vst1_f16(output0 + 12, _out03); vst1_f16(output0 + 20, _out05); // output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; // output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; // output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack8to4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to4, int inch, int outch) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 4b-8a-inch/8a-64-outch/4b kernel_tm_pack8to4.create(2 * inch / 8, 64, outch / 8 + (outch % 8) / 4, (size_t)2u * 32, 32); int p = 0; for (; p + 7 < outch; p += 8) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); const Mat k4 = kernel_tm.channel(p + 4); const Mat k5 = kernel_tm.channel(p + 5); const Mat k6 = kernel_tm.channel(p + 6); const Mat k7 = kernel_tm.channel(p + 7); Mat g0 = kernel_tm_pack8to4.channel(p / 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00[1] = (__fp16)k1.row(q + i)[k]; g00[2] = (__fp16)k2.row(q + i)[k]; g00[3] = (__fp16)k3.row(q + i)[k]; g00[4] = (__fp16)k4.row(q + i)[k]; g00[5] = (__fp16)k5.row(q + i)[k]; g00[6] = (__fp16)k6.row(q + i)[k]; g00[7] = (__fp16)k7.row(q + i)[k]; g00 += 8; } } } } for (; p + 3 < outch; p += 4) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); Mat g0 = kernel_tm_pack8to4.channel(p / 8 + (p % 8) / 4); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00[1] = (__fp16)k1.row(q + i)[k]; g00[2] = (__fp16)k2.row(q + i)[k]; g00[3] = (__fp16)k3.row(q + i)[k]; g00 += 4; } } } } } static void conv3x3s1_winograd64_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const __fp16* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[8][8][8]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { float16x8_t _r00 = vld1q_f16(r0); float16x8_t _r01 = vld1q_f16(r0 + 8); float16x8_t _r02 = vld1q_f16(r0 + 16); float16x8_t _r03 = vld1q_f16(r0 + 24); float16x8_t _r04 = vld1q_f16(r0 + 32); float16x8_t _r05 = vld1q_f16(r0 + 40); float16x8_t _r06 = vld1q_f16(r0 + 48); float16x8_t _r07 = vld1q_f16(r0 + 56); float16x8_t _tmp0m = vfmaq_n_f16(vsubq_f16(_r00, _r06), vsubq_f16(_r04, _r02), 5.25f); float16x8_t _tmp7m = vfmaq_n_f16(vsubq_f16(_r07, _r01), vsubq_f16(_r03, _r05), 5.25f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_r02, _r06), _r04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float16x8_t _tmp1m = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _tmp2m = vsubq_f16(_tmp12a, _tmp12b); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_r06, _r02, 0.25f), _r04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float16x8_t _tmp3m = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _tmp4m = vsubq_f16(_tmp34a, _tmp34b); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_r06, vfmsq_n_f16(_r02, _r04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float16x8_t _tmp5m = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _tmp6m = vsubq_f16(_tmp56a, _tmp56b); vst1q_f16(tmp[5][m], _tmp5m); vst1q_f16(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 8; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * 8; __fp16* r0_tm_1 = r0_tm_0 + tiles * 8; __fp16* r0_tm_2 = r0_tm_0 + tiles * 16; __fp16* r0_tm_3 = r0_tm_0 + tiles * 24; __fp16* r0_tm_4 = r0_tm_0 + tiles * 32; __fp16* r0_tm_5 = r0_tm_0 + tiles * 40; __fp16* r0_tm_6 = r0_tm_0 + tiles * 48; __fp16* r0_tm_7 = r0_tm_0 + tiles * 56; for (int m = 0; m < 8; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp06 = vld1q_f16(tmp[m][6]); float16x8_t _tmp07 = vld1q_f16(tmp[m][7]); float16x8_t _r0tm0 = vfmaq_n_f16(vsubq_f16(_tmp00, _tmp06), vsubq_f16(_tmp04, _tmp02), 5.25f); float16x8_t _r0tm7 = vfmaq_n_f16(vsubq_f16(_tmp07, _tmp01), vsubq_f16(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_tmp02, _tmp06), _tmp04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float16x8_t _r0tm1 = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _r0tm2 = vsubq_f16(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float16x8_t _r0tm3 = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _r0tm4 = vsubq_f16(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_tmp06, vfmsq_n_f16(_tmp02, _tmp04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float16x8_t _r0tm5 = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _r0tm6 = vsubq_f16(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1q_f16(r0_tm_0, _r0tm0); vst1q_f16(r0_tm_1, _r0tm1); vst1q_f16(r0_tm_2, _r0tm2); vst1q_f16(r0_tm_3, _r0tm3); vst1q_f16(r0_tm_4, _r0tm4); vst1q_f16(r0_tm_5, _r0tm5); vst1q_f16(r0_tm_6, _r0tm6); vst1q_f16(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 64; r0_tm_1 += tiles * 64; r0_tm_2 += tiles * 64; r0_tm_3 += tiles * 64; r0_tm_4 += tiles * 64; r0_tm_5 += tiles * 64; r0_tm_6 += tiles * 64; r0_tm_7 += tiles * 64; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tm2p = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u * 4, 4, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; __fp16* output0_tm = top_blob_tm.channel(p); __fp16* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(p / 2); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[5] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v0.h[7] \n" "fmla v24.8h, v17.8h, v1.h[0] \n" "fmla v25.8h, v17.8h, v1.h[1] \n" "fmla v26.8h, v17.8h, v1.h[2] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[4] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[6] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v2.h[0] \n" "fmla v25.8h, v18.8h, v2.h[1] \n" "fmla v26.8h, v18.8h, v2.h[2] \n" "fmla v27.8h, v18.8h, v2.h[3] \n" "fmla v28.8h, v18.8h, v2.h[4] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[6] \n" "fmla v31.8h, v18.8h, v2.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" "fmla v24.8h, v19.8h, v3.h[0] \n" "fmla v25.8h, v19.8h, v3.h[1] \n" "fmla v26.8h, v19.8h, v3.h[2] \n" "fmla v27.8h, v19.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v3.h[4] \n" "fmla v29.8h, v19.8h, v3.h[5] \n" "fmla v30.8h, v19.8h, v3.h[6] \n" "fmla v31.8h, v19.8h, v3.h[7] \n" "fmla v24.8h, v20.8h, v4.h[0] \n" "fmla v25.8h, v20.8h, v4.h[1] \n" "fmla v26.8h, v20.8h, v4.h[2] \n" "fmla v27.8h, v20.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v4.h[5] \n" "fmla v30.8h, v20.8h, v4.h[6] \n" "fmla v31.8h, v20.8h, v4.h[7] \n" "fmla v24.8h, v21.8h, v5.h[0] \n" "fmla v25.8h, v21.8h, v5.h[1] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[3] \n" "fmla v28.8h, v21.8h, v5.h[4] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v5.h[7] \n" "fmla v24.8h, v22.8h, v6.h[0] \n" "fmla v25.8h, v22.8h, v6.h[1] \n" "fmla v26.8h, v22.8h, v6.h[2] \n" "fmla v27.8h, v22.8h, v6.h[3] \n" "fmla v28.8h, v22.8h, v6.h[4] \n" "fmla v29.8h, v22.8h, v6.h[5] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v23.8h, v7.h[0] \n" "fmla v25.8h, v23.8h, v7.h[1] \n" "fmla v26.8h, v23.8h, v7.h[2] \n" "fmla v27.8h, v23.8h, v7.h[3] \n" "fmla v28.8h, v23.8h, v7.h[4] \n" "fmla v29.8h, v23.8h, v7.h[5] \n" "fmla v30.8h, v23.8h, v7.h[6] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "ext v28.16b, v28.16b, v28.16b, #8 \n" "ext v29.16b, v29.16b, v29.16b, #8 \n" "ext v30.16b, v30.16b, v30.16b, #8 \n" "ext v31.16b, v31.16b, v31.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v24.8h, v17.8h, v0.h[4] \n" "fmla v25.8h, v17.8h, v0.h[5] \n" "fmla v26.8h, v17.8h, v0.h[6] \n" "fmla v27.8h, v17.8h, v0.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v1.h[0] \n" "fmla v25.8h, v18.8h, v1.h[1] \n" "fmla v26.8h, v18.8h, v1.h[2] \n" "fmla v27.8h, v18.8h, v1.h[3] \n" "fmla v24.8h, v19.8h, v1.h[4] \n" "fmla v25.8h, v19.8h, v1.h[5] \n" "fmla v26.8h, v19.8h, v1.h[6] \n" "fmla v27.8h, v19.8h, v1.h[7] \n" "fmla v24.8h, v20.8h, v2.h[0] \n" "fmla v25.8h, v20.8h, v2.h[1] \n" "fmla v26.8h, v20.8h, v2.h[2] \n" "fmla v27.8h, v20.8h, v2.h[3] \n" "fmla v24.8h, v21.8h, v2.h[4] \n" "fmla v25.8h, v21.8h, v2.h[5] \n" "fmla v26.8h, v21.8h, v2.h[6] \n" "fmla v27.8h, v21.8h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v22.8h, v3.h[0] \n" "fmla v25.8h, v22.8h, v3.h[1] \n" "fmla v26.8h, v22.8h, v3.h[2] \n" "fmla v27.8h, v22.8h, v3.h[3] \n" "fmla v24.8h, v23.8h, v3.h[4] \n" "fmla v25.8h, v23.8h, v3.h[5] \n" "fmla v26.8h, v23.8h, v3.h[6] \n" "fmla v27.8h, v23.8h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); float16x8_t _sum0 = vdupq_n_f16((__fp16)0.f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x8_t _k0 = vld1q_f16(kptr); float16x8_t _k1 = vld1q_f16(kptr + 8); float16x8_t _k2 = vld1q_f16(kptr + 16); float16x8_t _k3 = vld1q_f16(kptr + 24); float16x8_t _k4 = vld1q_f16(kptr + 32); float16x8_t _k5 = vld1q_f16(kptr + 40); float16x8_t _k6 = vld1q_f16(kptr + 48); float16x8_t _k7 = vld1q_f16(kptr + 56); _sum0 = vfmaq_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfmaq_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfmaq_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfmaq_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfmaq_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfmaq_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfmaq_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfmaq_laneq_f16(_sum0, _k7, _r0, 7); kptr += 64; r0 += 8; } vst1_f16(output0_tm, vget_low_f16(_sum0)); vst1_f16(output1_tm, vget_high_f16(_sum0)); output0_tm += 4; output1_tm += 4; } } } remain_outch_start += nn_outch << 1; for (int p = remain_outch_start; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v28.4h, v16.4h, v0.h[4] \n" "fmla v29.4h, v16.4h, v0.h[5] \n" "fmla v30.4h, v16.4h, v0.h[6] \n" "fmla v31.4h, v16.4h, v0.h[7] \n" "fmla v24.4h, v17.4h, v1.h[0] \n" "fmla v25.4h, v17.4h, v1.h[1] \n" "fmla v26.4h, v17.4h, v1.h[2] \n" "fmla v27.4h, v17.4h, v1.h[3] \n" "fmla v28.4h, v17.4h, v1.h[4] \n" "fmla v29.4h, v17.4h, v1.h[5] \n" "fmla v30.4h, v17.4h, v1.h[6] \n" "fmla v31.4h, v17.4h, v1.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v2.h[0] \n" "fmla v25.4h, v18.4h, v2.h[1] \n" "fmla v26.4h, v18.4h, v2.h[2] \n" "fmla v27.4h, v18.4h, v2.h[3] \n" "fmla v28.4h, v18.4h, v2.h[4] \n" "fmla v29.4h, v18.4h, v2.h[5] \n" "fmla v30.4h, v18.4h, v2.h[6] \n" "fmla v31.4h, v18.4h, v2.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" "fmla v24.4h, v19.4h, v3.h[0] \n" "fmla v25.4h, v19.4h, v3.h[1] \n" "fmla v26.4h, v19.4h, v3.h[2] \n" "fmla v27.4h, v19.4h, v3.h[3] \n" "fmla v28.4h, v19.4h, v3.h[4] \n" "fmla v29.4h, v19.4h, v3.h[5] \n" "fmla v30.4h, v19.4h, v3.h[6] \n" "fmla v31.4h, v19.4h, v3.h[7] \n" "fmla v24.4h, v20.4h, v4.h[0] \n" "fmla v25.4h, v20.4h, v4.h[1] \n" "fmla v26.4h, v20.4h, v4.h[2] \n" "fmla v27.4h, v20.4h, v4.h[3] \n" "fmla v28.4h, v20.4h, v4.h[4] \n" "fmla v29.4h, v20.4h, v4.h[5] \n" "fmla v30.4h, v20.4h, v4.h[6] \n" "fmla v31.4h, v20.4h, v4.h[7] \n" "fmla v24.4h, v21.4h, v5.h[0] \n" "fmla v25.4h, v21.4h, v5.h[1] \n" "fmla v26.4h, v21.4h, v5.h[2] \n" "fmla v27.4h, v21.4h, v5.h[3] \n" "fmla v28.4h, v21.4h, v5.h[4] \n" "fmla v29.4h, v21.4h, v5.h[5] \n" "fmla v30.4h, v21.4h, v5.h[6] \n" "fmla v31.4h, v21.4h, v5.h[7] \n" "fmla v24.4h, v22.4h, v6.h[0] \n" "fmla v25.4h, v22.4h, v6.h[1] \n" "fmla v26.4h, v22.4h, v6.h[2] \n" "fmla v27.4h, v22.4h, v6.h[3] \n" "fmla v28.4h, v22.4h, v6.h[4] \n" "fmla v29.4h, v22.4h, v6.h[5] \n" "fmla v30.4h, v22.4h, v6.h[6] \n" "fmla v31.4h, v22.4h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v23.4h, v7.h[0] \n" "fmla v25.4h, v23.4h, v7.h[1] \n" "fmla v26.4h, v23.4h, v7.h[2] \n" "fmla v27.4h, v23.4h, v7.h[3] \n" "fmla v28.4h, v23.4h, v7.h[4] \n" "fmla v29.4h, v23.4h, v7.h[5] \n" "fmla v30.4h, v23.4h, v7.h[6] \n" "fmla v31.4h, v23.4h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v24.4h, v17.4h, v0.h[4] \n" "fmla v25.4h, v17.4h, v0.h[5] \n" "fmla v26.4h, v17.4h, v0.h[6] \n" "fmla v27.4h, v17.4h, v0.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v1.h[0] \n" "fmla v25.4h, v18.4h, v1.h[1] \n" "fmla v26.4h, v18.4h, v1.h[2] \n" "fmla v27.4h, v18.4h, v1.h[3] \n" "fmla v24.4h, v19.4h, v1.h[4] \n" "fmla v25.4h, v19.4h, v1.h[5] \n" "fmla v26.4h, v19.4h, v1.h[6] \n" "fmla v27.4h, v19.4h, v1.h[7] \n" "fmla v24.4h, v20.4h, v2.h[0] \n" "fmla v25.4h, v20.4h, v2.h[1] \n" "fmla v26.4h, v20.4h, v2.h[2] \n" "fmla v27.4h, v20.4h, v2.h[3] \n" "fmla v24.4h, v21.4h, v2.h[4] \n" "fmla v25.4h, v21.4h, v2.h[5] \n" "fmla v26.4h, v21.4h, v2.h[6] \n" "fmla v27.4h, v21.4h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v22.4h, v3.h[0] \n" "fmla v25.4h, v22.4h, v3.h[1] \n" "fmla v26.4h, v22.4h, v3.h[2] \n" "fmla v27.4h, v22.4h, v3.h[3] \n" "fmla v24.4h, v23.4h, v3.h[4] \n" "fmla v25.4h, v23.4h, v3.h[5] \n" "fmla v26.4h, v23.4h, v3.h[6] \n" "fmla v27.4h, v23.4h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); float16x4_t _sum0 = vdup_n_f16((__fp16)0.f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x4_t _k0 = vld1_f16(kptr); float16x4_t _k1 = vld1_f16(kptr + 4); float16x4_t _k2 = vld1_f16(kptr + 8); float16x4_t _k3 = vld1_f16(kptr + 12); float16x4_t _k4 = vld1_f16(kptr + 16); float16x4_t _k5 = vld1_f16(kptr + 20); float16x4_t _k6 = vld1_f16(kptr + 24); float16x4_t _k7 = vld1_f16(kptr + 28); _sum0 = vfma_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfma_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfma_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfma_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfma_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfma_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfma_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfma_laneq_f16(_sum0, _k7, _r0, 7); kptr += 32; r0 += 8; } vst1_f16(output0_tm, _sum0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u * 4, 4, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; float16x4_t _bias0 = bias ? vld1_f16((const __fp16*)bias + p * 4) : vdup_n_f16(0.f); __fp16 tmp[6][8][4]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * 4; const __fp16* output0_tm_1 = output0_tm_0 + tiles * 4; const __fp16* output0_tm_2 = output0_tm_0 + tiles * 8; const __fp16* output0_tm_3 = output0_tm_0 + tiles * 12; const __fp16* output0_tm_4 = output0_tm_0 + tiles * 16; const __fp16* output0_tm_5 = output0_tm_0 + tiles * 20; const __fp16* output0_tm_6 = output0_tm_0 + tiles * 24; const __fp16* output0_tm_7 = output0_tm_0 + tiles * 28; __fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 4; // TODO neon optimize for (int m = 0; m < 8; m++) { float16x4_t _out0tm0 = vld1_f16(output0_tm_0); float16x4_t _out0tm1 = vld1_f16(output0_tm_1); float16x4_t _out0tm2 = vld1_f16(output0_tm_2); float16x4_t _out0tm3 = vld1_f16(output0_tm_3); float16x4_t _out0tm4 = vld1_f16(output0_tm_4); float16x4_t _out0tm5 = vld1_f16(output0_tm_5); float16x4_t _out0tm6 = vld1_f16(output0_tm_6); float16x4_t _out0tm7 = vld1_f16(output0_tm_7); float16x4_t _tmp024a = vadd_f16(_out0tm1, _out0tm2); float16x4_t _tmp135a = vsub_f16(_out0tm1, _out0tm2); // float tmp024a = output0_tm[1] + output0_tm[2]; // float tmp135a = output0_tm[1] - output0_tm[2]; float16x4_t _tmp024b = vadd_f16(_out0tm3, _out0tm4); float16x4_t _tmp135b = vsub_f16(_out0tm3, _out0tm4); // float tmp024b = output0_tm[3] + output0_tm[4]; // float tmp135b = output0_tm[3] - output0_tm[4]; float16x4_t _tmp024c = vadd_f16(_out0tm5, _out0tm6); float16x4_t _tmp135c = vsub_f16(_out0tm5, _out0tm6); // float tmp024c = output0_tm[5] + output0_tm[6]; // float tmp135c = output0_tm[5] - output0_tm[6]; float16x4_t _tmp0m = vadd_f16(vadd_f16(_out0tm0, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f)); float16x4_t _tmp2m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float16x4_t _tmp4m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1_f16(tmp[0][m], _tmp0m); vst1_f16(tmp[2][m], _tmp2m); vst1_f16(tmp[4][m], _tmp4m); // tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; // tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; // tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; float16x4_t _tmp1m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float16x4_t _tmp3m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float16x4_t _tmp5m = vadd_f16(vadd_f16(_out0tm7, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f)); vst1_f16(tmp[1][m], _tmp1m); vst1_f16(tmp[3][m], _tmp3m); vst1_f16(tmp[5][m], _tmp5m); // tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; // tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; // tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float16x4_t _tmp00 = vld1_f16(tmp[m][0]); float16x4_t _tmp01 = vld1_f16(tmp[m][1]); float16x4_t _tmp02 = vld1_f16(tmp[m][2]); float16x4_t _tmp03 = vld1_f16(tmp[m][3]); float16x4_t _tmp04 = vld1_f16(tmp[m][4]); float16x4_t _tmp05 = vld1_f16(tmp[m][5]); float16x4_t _tmp06 = vld1_f16(tmp[m][6]); float16x4_t _tmp07 = vld1_f16(tmp[m][7]); float16x4_t _tmp024a = vadd_f16(_tmp01, _tmp02); float16x4_t _tmp135a = vsub_f16(_tmp01, _tmp02); // float tmp024a = tmp0[1] + tmp0[2]; // float tmp135a = tmp0[1] - tmp0[2]; float16x4_t _tmp024b = vadd_f16(_tmp03, _tmp04); float16x4_t _tmp135b = vsub_f16(_tmp03, _tmp04); // float tmp024b = tmp0[3] + tmp0[4]; // float tmp135b = tmp0[3] - tmp0[4]; float16x4_t _tmp024c = vadd_f16(_tmp05, _tmp06); float16x4_t _tmp135c = vsub_f16(_tmp05, _tmp06); // float tmp024c = tmp0[5] + tmp0[6]; // float tmp135c = tmp0[5] - tmp0[6]; float16x4_t _out00 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp00, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f))); float16x4_t _out02 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float16x4_t _out04 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1_f16(output0, _out00); vst1_f16(output0 + 8, _out02); vst1_f16(output0 + 16, _out04); // output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; // output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; // output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; float16x4_t _out01 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float16x4_t _out03 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float16x4_t _out05 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp07, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f))); vst1_f16(output0 + 4, _out01); vst1_f16(output0 + 12, _out03); vst1_f16(output0 + 20, _out05); // output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; // output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; // output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2020 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack8to4_fp16sa_neon(const Mat & kernel, Mat & kernel_tm_pack8to4, int inch, int outch) { //winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0 f, 0.0 f, 0.0 f}, {-2.0 f / 9, -2.0 f / 9, -2.0 f / 9}, {-2.0 f / 9, 2.0 f / 9, -2.0 f / 9}, {1.0 f / 90, 1.0 f / 45, 2.0 f / 45}, {1.0 f / 90, -1.0 f / 45, 2.0 f / 45}, {1.0 f / 45, 1.0 f / 90, 1.0 f / 180}, {1.0 f / 45, -1.0 f / 90, 1.0 f / 180}, {0.0 f, 0.0 f, 1.0 f} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float *kernel0 = (const float *)kernel + p * inch * 9 + q * 9; float *kernel_tm0 = kernel_tm.channel(p).row(q); //transform kernel, transposed const float *k0 = kernel0; const float *k1 = kernel0 + 3; const float *k2 = kernel0 + 6; //h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } //v for (int j = 0; j < 8; j++) { float *tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } //interleave // src = 64 - inch - outch // dst = 4 b - 8 a - inch / 8 a - 64 - outch / 4 b kernel_tm_pack8to4.create(2 * inch / 8, 64, outch / 8 + (outch % 8) / 4, (size_t) 2u * 32, 32); int p = 0; for (; p + 7 < outch; p += 8) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); const Mat k4 = kernel_tm.channel(p + 4); const Mat k5 = kernel_tm.channel(p + 5); const Mat k6 = kernel_tm.channel(p + 6); const Mat k7 = kernel_tm.channel(p + 7); Mat g0 = kernel_tm_pack8to4.channel(p / 8); for (int k = 0; k < 64; k++) { __fp16 *g00 = g0.row < __fp16 > (k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16) k0.row(q + i)[k]; g00[1] = (__fp16) k1.row(q + i)[k]; g00[2] = (__fp16) k2.row(q + i)[k]; g00[3] = (__fp16) k3.row(q + i)[k]; g00[4] = (__fp16) k4.row(q + i)[k]; g00[5] = (__fp16) k5.row(q + i)[k]; g00[6] = (__fp16) k6.row(q + i)[k]; g00[7] = (__fp16) k7.row(q + i)[k]; g00 += 8; } } } } for (; p + 3 < outch; p += 4) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); Mat g0 = kernel_tm_pack8to4.channel(p / 8 + (p % 8) / 4); for (int k = 0; k < 64; k++) { __fp16 *g00 = g0.row < __fp16 > (k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16) k0.row(q + i)[k]; g00[1] = (__fp16) k1.row(q + i)[k]; g00[2] = (__fp16) k2.row(q + i)[k]; g00[3] = (__fp16) k3.row(q + i)[k]; g00 += 4; } } } } } static void conv3x3s1_winograd64_pack8to4_fp16sa_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & kernel_tm, const Mat & _bias, const Option & opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; //pad to 6 n + 2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0. f, opt); const __fp16 *bias = _bias; //BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; //bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator); //const float itm[8][8] = { //{1.0 f, 0.0 f, -5.25 f, 0.00 f, 5.25 f, 0.00 f, -1.0 f, 0.0 f}, // //{0.0 f, 1.0 f, 1.00 f, -4.25 f, -4.25 f, 1.00 f, 1.0 f, 0.0 f}, //{0.0 f, -1.0 f, 1.00 f, 4.25 f, -4.25 f, -1.00 f, 1.0 f, 0.0 f}, // //{0.0 f, 0.5 f, 0.25 f, -2.50 f, -1.25 f, 2.00 f, 1.0 f, 0.0 f}, //{0.0 f, -0.5 f, 0.25 f, 2.50 f, -1.25 f, -2.00 f, 1.0 f, 0.0 f}, // //{0.0 f, 2.0 f, 4.00 f, -2.50 f, -5.00 f, 0.50 f, 1.0 f, 0.0 f}, //{0.0 f, -2.0 f, 4.00 f, 2.50 f, -5.00 f, -0.50 f, 1.0 f, 0.0 f}, // //{0.0 f, -1.0 f, 0.00 f, 5.25 f, 0.00 f, -5.25 f, 0.0 f, 1.0 f} //}; //0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[8][8][8]; //tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const __fp16 *r0 = img0.row < const __fp16 > (i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { float16x8_t _r00 = vld1q_f16(r0); float16x8_t _r01 = vld1q_f16(r0 + 8); float16x8_t _r02 = vld1q_f16(r0 + 16); float16x8_t _r03 = vld1q_f16(r0 + 24); float16x8_t _r04 = vld1q_f16(r0 + 32); float16x8_t _r05 = vld1q_f16(r0 + 40); float16x8_t _r06 = vld1q_f16(r0 + 48); float16x8_t _r07 = vld1q_f16(r0 + 56); float16x8_t _tmp0m = vfmaq_n_f16(vsubq_f16(_r00, _r06), vsubq_f16(_r04, _r02), 5.25 f); float16x8_t _tmp7m = vfmaq_n_f16(vsubq_f16(_r07, _r01), vsubq_f16(_r03, _r05), 5.25 f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[7][m], _tmp7m); //tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; //tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_r02, _r06), _r04, 4.25 f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_r01, _r05), _r03, 4.25 f); //float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); //float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float16x8_t _tmp1m = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _tmp2m = vsubq_f16(_tmp12a, _tmp12b); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); //tmp[1][m] = tmp12a + tmp12b; //tmp[2][m] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_r06, _r02, 0.25 f), _r04, 1.25 f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 0.5 f), _r03, 2.5 f), _r05, 2. f); //float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); //float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float16x8_t _tmp3m = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _tmp4m = vsubq_f16(_tmp34a, _tmp34b); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[4][m], _tmp4m); //tmp[3][m] = tmp34a + tmp34b; //tmp[4][m] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_r06, vfmsq_n_f16(_r02, _r04, 1.25 f), 4. f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 2. f), _r03, 2.5 f), _r05, 0.5 f); //float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); //float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float16x8_t _tmp5m = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _tmp6m = vsubq_f16(_tmp56a, _tmp56b); vst1q_f16(tmp[5][m], _tmp5m); vst1q_f16(tmp[6][m], _tmp6m); //tmp[5][m] = tmp56a + tmp56b; //tmp[6][m] = tmp56a - tmp56b; r0 += w * 8; } __fp16 *r0_tm_0 = (__fp16 *) img0_tm + (i * w_tm / 8 + j) * 8; __fp16 *r0_tm_1 = r0_tm_0 + tiles * 8; __fp16 *r0_tm_2 = r0_tm_0 + tiles * 16; __fp16 *r0_tm_3 = r0_tm_0 + tiles * 24; __fp16 *r0_tm_4 = r0_tm_0 + tiles * 32; __fp16 *r0_tm_5 = r0_tm_0 + tiles * 40; __fp16 *r0_tm_6 = r0_tm_0 + tiles * 48; __fp16 *r0_tm_7 = r0_tm_0 + tiles * 56; for (int m = 0; m < 8; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp06 = vld1q_f16(tmp[m][6]); float16x8_t _tmp07 = vld1q_f16(tmp[m][7]); float16x8_t _r0tm0 = vfmaq_n_f16(vsubq_f16(_tmp00, _tmp06), vsubq_f16(_tmp04, _tmp02), 5.25 f); float16x8_t _r0tm7 = vfmaq_n_f16(vsubq_f16(_tmp07, _tmp01), vsubq_f16(_tmp03, _tmp05), 5.25 f); //r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; //r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_tmp02, _tmp06), _tmp04, 4.25 f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_tmp01, _tmp05), _tmp03, 4.25 f); //float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); //float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float16x8_t _r0tm1 = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _r0tm2 = vsubq_f16(_tmp12a, _tmp12b); //r0_tm[1] = tmp12a + tmp12b; //r0_tm[2] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_tmp06, _tmp02, 0.25 f), _tmp04, 1.25 f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 0.5 f), _tmp03, 2.5 f), _tmp05, 2. f); //float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); //float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float16x8_t _r0tm3 = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _r0tm4 = vsubq_f16(_tmp34a, _tmp34b); //r0_tm[3] = tmp34a + tmp34b; //r0_tm[4] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_tmp06, vfmsq_n_f16(_tmp02, _tmp04, 1.25 f), 4. f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 2. f), _tmp03, 2.5 f), _tmp05, 0.5 f); //float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); //float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float16x8_t _r0tm5 = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _r0tm6 = vsubq_f16(_tmp56a, _tmp56b); //r0_tm[5] = tmp56a + tmp56b; //r0_tm[6] = tmp56a - tmp56b; vst1q_f16(r0_tm_0, _r0tm0); vst1q_f16(r0_tm_1, _r0tm1); vst1q_f16(r0_tm_2, _r0tm2); vst1q_f16(r0_tm_3, _r0tm3); vst1q_f16(r0_tm_4, _r0tm4); vst1q_f16(r0_tm_5, _r0tm5); vst1q_f16(r0_tm_6, _r0tm6); vst1q_f16(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 64; r0_tm_1 += tiles * 64; r0_tm_2 += tiles * 64; r0_tm_3 += tiles * 64; r0_tm_4 += tiles * 64; r0_tm_5 += tiles * 64; r0_tm_6 += tiles * 64; r0_tm_7 += tiles * 64; } } } } } bottom_blob_bordered = Mat(); //END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; //permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else //if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); //tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16 *tm2p = tm2.row < __fp16 > (i / 8); const __fp16 *r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { //transpose 8 x8 asm volatile ( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r" (r0), //%0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { __fp16 *tm2p = tm2.row < __fp16 > (i / 8 + (i % 8) / 4); const __fp16 *r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { //transpose 8 x4 asm volatile ( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r" (r0), //%0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { __fp16 *tm2p = tm2.row < __fp16 > (i / 8 + (i % 8) / 4 + i % 4); const __fp16 *r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { asm volatile ( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r" (r0), //%0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); //permute end top_blob_tm.create(tiles, 64, outch, 2u * 4, 4, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; __fp16 *output0_tm = top_blob_tm.channel(p); __fp16 *output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(p / 2); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16 *r0 = bb2.row < const __fp16 > (i / 8); const __fp16 *kptr = kernel01_tm.row < const __fp16 > (r); int nn = inch; //inch always > 0 asm volatile ( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[5] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v0.h[7] \n" "fmla v24.8h, v17.8h, v1.h[0] \n" "fmla v25.8h, v17.8h, v1.h[1] \n" "fmla v26.8h, v17.8h, v1.h[2] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[4] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[6] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v2.h[0] \n" "fmla v25.8h, v18.8h, v2.h[1] \n" "fmla v26.8h, v18.8h, v2.h[2] \n" "fmla v27.8h, v18.8h, v2.h[3] \n" "fmla v28.8h, v18.8h, v2.h[4] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[6] \n" "fmla v31.8h, v18.8h, v2.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" "fmla v24.8h, v19.8h, v3.h[0] \n" "fmla v25.8h, v19.8h, v3.h[1] \n" "fmla v26.8h, v19.8h, v3.h[2] \n" "fmla v27.8h, v19.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v3.h[4] \n" "fmla v29.8h, v19.8h, v3.h[5] \n" "fmla v30.8h, v19.8h, v3.h[6] \n" "fmla v31.8h, v19.8h, v3.h[7] \n" "fmla v24.8h, v20.8h, v4.h[0] \n" "fmla v25.8h, v20.8h, v4.h[1] \n" "fmla v26.8h, v20.8h, v4.h[2] \n" "fmla v27.8h, v20.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v4.h[5] \n" "fmla v30.8h, v20.8h, v4.h[6] \n" "fmla v31.8h, v20.8h, v4.h[7] \n" "fmla v24.8h, v21.8h, v5.h[0] \n" "fmla v25.8h, v21.8h, v5.h[1] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[3] \n" "fmla v28.8h, v21.8h, v5.h[4] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v5.h[7] \n" "fmla v24.8h, v22.8h, v6.h[0] \n" "fmla v25.8h, v22.8h, v6.h[1] \n" "fmla v26.8h, v22.8h, v6.h[2] \n" "fmla v27.8h, v22.8h, v6.h[3] \n" "fmla v28.8h, v22.8h, v6.h[4] \n" "fmla v29.8h, v22.8h, v6.h[5] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v23.8h, v7.h[0] \n" "fmla v25.8h, v23.8h, v7.h[1] \n" "fmla v26.8h, v23.8h, v7.h[2] \n" "fmla v27.8h, v23.8h, v7.h[3] \n" "fmla v28.8h, v23.8h, v7.h[4] \n" "fmla v29.8h, v23.8h, v7.h[5] \n" "fmla v30.8h, v23.8h, v7.h[6] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "ext v28.16b, v28.16b, v28.16b, #8 \n" "ext v29.16b, v29.16b, v29.16b, #8 \n" "ext v30.16b, v30.16b, v30.16b, #8 \n" "ext v31.16b, v31.16b, v31.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r" (nn), //%0 "=r"(output0_tm), //%1 "=r"(output1_tm), //%2 "=r"(r0), //%3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16 *r0 = bb2.row < const __fp16 > (i / 8 + (i % 8) / 4); const __fp16 *kptr = kernel01_tm.row < const __fp16 > (r); int nn = inch; //inch always > 0 asm volatile ( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v24.8h, v17.8h, v0.h[4] \n" "fmla v25.8h, v17.8h, v0.h[5] \n" "fmla v26.8h, v17.8h, v0.h[6] \n" "fmla v27.8h, v17.8h, v0.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v1.h[0] \n" "fmla v25.8h, v18.8h, v1.h[1] \n" "fmla v26.8h, v18.8h, v1.h[2] \n" "fmla v27.8h, v18.8h, v1.h[3] \n" "fmla v24.8h, v19.8h, v1.h[4] \n" "fmla v25.8h, v19.8h, v1.h[5] \n" "fmla v26.8h, v19.8h, v1.h[6] \n" "fmla v27.8h, v19.8h, v1.h[7] \n" "fmla v24.8h, v20.8h, v2.h[0] \n" "fmla v25.8h, v20.8h, v2.h[1] \n" "fmla v26.8h, v20.8h, v2.h[2] \n" "fmla v27.8h, v20.8h, v2.h[3] \n" "fmla v24.8h, v21.8h, v2.h[4] \n" "fmla v25.8h, v21.8h, v2.h[5] \n" "fmla v26.8h, v21.8h, v2.h[6] \n" "fmla v27.8h, v21.8h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v22.8h, v3.h[0] \n" "fmla v25.8h, v22.8h, v3.h[1] \n" "fmla v26.8h, v22.8h, v3.h[2] \n" "fmla v27.8h, v22.8h, v3.h[3] \n" "fmla v24.8h, v23.8h, v3.h[4] \n" "fmla v25.8h, v23.8h, v3.h[5] \n" "fmla v26.8h, v23.8h, v3.h[6] \n" "fmla v27.8h, v23.8h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" : "=r" (nn), //%0 "=r"(output0_tm), //%1 "=r"(output1_tm), //%2 "=r"(r0), //%3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i < tiles; i++) { const __fp16 *r0 = bb2.row < const __fp16 > (i / 8 + (i % 8) / 4 + i % 4); const __fp16 *kptr = kernel01_tm.row < const __fp16 > (r); float16x8_t _sum0 = vdupq_n_f16((__fp16) 0. f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x8_t _k0 = vld1q_f16(kptr); float16x8_t _k1 = vld1q_f16(kptr + 8); float16x8_t _k2 = vld1q_f16(kptr + 16); float16x8_t _k3 = vld1q_f16(kptr + 24); float16x8_t _k4 = vld1q_f16(kptr + 32); float16x8_t _k5 = vld1q_f16(kptr + 40); float16x8_t _k6 = vld1q_f16(kptr + 48); float16x8_t _k7 = vld1q_f16(kptr + 56); _sum0 = vfmaq_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfmaq_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfmaq_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfmaq_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfmaq_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfmaq_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfmaq_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfmaq_laneq_f16(_sum0, _k7, _r0, 7); kptr += 64; r0 += 8; } vst1_f16(output0_tm, vget_low_f16(_sum0)); vst1_f16(output1_tm, vget_high_f16(_sum0)); output0_tm += 4; output1_tm += 4; } } } remain_outch_start += nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16 *output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16 *r0 = bb2.row < const __fp16 > (i / 8); const __fp16 *kptr = kernel0_tm.row < const __fp16 > (r); int nn = inch; //inch always > 0 asm volatile ( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v28.4h, v16.4h, v0.h[4] \n" "fmla v29.4h, v16.4h, v0.h[5] \n" "fmla v30.4h, v16.4h, v0.h[6] \n" "fmla v31.4h, v16.4h, v0.h[7] \n" "fmla v24.4h, v17.4h, v1.h[0] \n" "fmla v25.4h, v17.4h, v1.h[1] \n" "fmla v26.4h, v17.4h, v1.h[2] \n" "fmla v27.4h, v17.4h, v1.h[3] \n" "fmla v28.4h, v17.4h, v1.h[4] \n" "fmla v29.4h, v17.4h, v1.h[5] \n" "fmla v30.4h, v17.4h, v1.h[6] \n" "fmla v31.4h, v17.4h, v1.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v2.h[0] \n" "fmla v25.4h, v18.4h, v2.h[1] \n" "fmla v26.4h, v18.4h, v2.h[2] \n" "fmla v27.4h, v18.4h, v2.h[3] \n" "fmla v28.4h, v18.4h, v2.h[4] \n" "fmla v29.4h, v18.4h, v2.h[5] \n" "fmla v30.4h, v18.4h, v2.h[6] \n" "fmla v31.4h, v18.4h, v2.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" "fmla v24.4h, v19.4h, v3.h[0] \n" "fmla v25.4h, v19.4h, v3.h[1] \n" "fmla v26.4h, v19.4h, v3.h[2] \n" "fmla v27.4h, v19.4h, v3.h[3] \n" "fmla v28.4h, v19.4h, v3.h[4] \n" "fmla v29.4h, v19.4h, v3.h[5] \n" "fmla v30.4h, v19.4h, v3.h[6] \n" "fmla v31.4h, v19.4h, v3.h[7] \n" "fmla v24.4h, v20.4h, v4.h[0] \n" "fmla v25.4h, v20.4h, v4.h[1] \n" "fmla v26.4h, v20.4h, v4.h[2] \n" "fmla v27.4h, v20.4h, v4.h[3] \n" "fmla v28.4h, v20.4h, v4.h[4] \n" "fmla v29.4h, v20.4h, v4.h[5] \n" "fmla v30.4h, v20.4h, v4.h[6] \n" "fmla v31.4h, v20.4h, v4.h[7] \n" "fmla v24.4h, v21.4h, v5.h[0] \n" "fmla v25.4h, v21.4h, v5.h[1] \n" "fmla v26.4h, v21.4h, v5.h[2] \n" "fmla v27.4h, v21.4h, v5.h[3] \n" "fmla v28.4h, v21.4h, v5.h[4] \n" "fmla v29.4h, v21.4h, v5.h[5] \n" "fmla v30.4h, v21.4h, v5.h[6] \n" "fmla v31.4h, v21.4h, v5.h[7] \n" "fmla v24.4h, v22.4h, v6.h[0] \n" "fmla v25.4h, v22.4h, v6.h[1] \n" "fmla v26.4h, v22.4h, v6.h[2] \n" "fmla v27.4h, v22.4h, v6.h[3] \n" "fmla v28.4h, v22.4h, v6.h[4] \n" "fmla v29.4h, v22.4h, v6.h[5] \n" "fmla v30.4h, v22.4h, v6.h[6] \n" "fmla v31.4h, v22.4h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v23.4h, v7.h[0] \n" "fmla v25.4h, v23.4h, v7.h[1] \n" "fmla v26.4h, v23.4h, v7.h[2] \n" "fmla v27.4h, v23.4h, v7.h[3] \n" "fmla v28.4h, v23.4h, v7.h[4] \n" "fmla v29.4h, v23.4h, v7.h[5] \n" "fmla v30.4h, v23.4h, v7.h[6] \n" "fmla v31.4h, v23.4h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r" (nn), //%0 "=r"(output0_tm), //%1 "=r"(r0), //%2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16 *r0 = bb2.row < const __fp16 > (i / 8 + (i % 8) / 4); const __fp16 *kptr = kernel0_tm.row < const __fp16 > (r); int nn = inch; //inch always > 0 asm volatile ( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v24.4h, v17.4h, v0.h[4] \n" "fmla v25.4h, v17.4h, v0.h[5] \n" "fmla v26.4h, v17.4h, v0.h[6] \n" "fmla v27.4h, v17.4h, v0.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v1.h[0] \n" "fmla v25.4h, v18.4h, v1.h[1] \n" "fmla v26.4h, v18.4h, v1.h[2] \n" "fmla v27.4h, v18.4h, v1.h[3] \n" "fmla v24.4h, v19.4h, v1.h[4] \n" "fmla v25.4h, v19.4h, v1.h[5] \n" "fmla v26.4h, v19.4h, v1.h[6] \n" "fmla v27.4h, v19.4h, v1.h[7] \n" "fmla v24.4h, v20.4h, v2.h[0] \n" "fmla v25.4h, v20.4h, v2.h[1] \n" "fmla v26.4h, v20.4h, v2.h[2] \n" "fmla v27.4h, v20.4h, v2.h[3] \n" "fmla v24.4h, v21.4h, v2.h[4] \n" "fmla v25.4h, v21.4h, v2.h[5] \n" "fmla v26.4h, v21.4h, v2.h[6] \n" "fmla v27.4h, v21.4h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v22.4h, v3.h[0] \n" "fmla v25.4h, v22.4h, v3.h[1] \n" "fmla v26.4h, v22.4h, v3.h[2] \n" "fmla v27.4h, v22.4h, v3.h[3] \n" "fmla v24.4h, v23.4h, v3.h[4] \n" "fmla v25.4h, v23.4h, v3.h[5] \n" "fmla v26.4h, v23.4h, v3.h[6] \n" "fmla v27.4h, v23.4h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" : "=r" (nn), //%0 "=r"(output0_tm), //%1 "=r"(r0), //%2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; i < tiles; i++) { const __fp16 *r0 = bb2.row < const __fp16 > (i / 8 + (i % 8) / 4 + i % 4); const __fp16 *kptr = kernel0_tm.row < const __fp16 > (r); float16x4_t _sum0 = vdup_n_f16((__fp16) 0. f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x4_t _k0 = vld1_f16(kptr); float16x4_t _k1 = vld1_f16(kptr + 4); float16x4_t _k2 = vld1_f16(kptr + 8); float16x4_t _k3 = vld1_f16(kptr + 12); float16x4_t _k4 = vld1_f16(kptr + 16); float16x4_t _k5 = vld1_f16(kptr + 20); float16x4_t _k6 = vld1_f16(kptr + 24); float16x4_t _k7 = vld1_f16(kptr + 28); _sum0 = vfma_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfma_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfma_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfma_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfma_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfma_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfma_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfma_laneq_f16(_sum0, _k7, _r0, 7); kptr += 32; r0 += 8; } vst1_f16(output0_tm, _sum0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); //END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u * 4, 4, opt.workspace_allocator); } { //const float otm[6][8] = { //{1.0 f, 1.0 f, 1.0 f, 1.0 f, 1.0 f, 32.0 f, 32.0 f, 0.0 f}, //{0.0 f, 1.0 f, -1.0 f, 2.0 f, -2.0 f, 16.0 f, -16.0 f, 0.0 f}, //{0.0 f, 1.0 f, 1.0 f, 4.0 f, 4.0 f, 8.0 f, 8.0 f, 0.0 f}, //{0.0 f, 1.0 f, -1.0 f, 8.0 f, -8.0 f, 4.0 f, -4.0 f, 0.0 f}, //{0.0 f, 1.0 f, 1.0 f, 16.0 f, 16.0 f, 2.0 f, 2.0 f, 0.0 f}, //{0.0 f, 1.0 f, -1.0 f, 32.0 f, -32.0 f, 1.0 f, -1.0 f, 1.0 f} //}; //0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16 + (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32 + (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); //const float bias0 = bias ? bias[p] : 0. f; float16x4_t _bias0 = bias ? vld1_f16((const __fp16 *)bias + p * 4) : vdup_n_f16(0. f); __fp16 tmp[6][8][4]; //tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { //top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const __fp16 *output0_tm_0 = (const __fp16 *)out0_tm + (i * w_tm / 8 + j) * 4; const __fp16 *output0_tm_1 = output0_tm_0 + tiles * 4; const __fp16 *output0_tm_2 = output0_tm_0 + tiles * 8; const __fp16 *output0_tm_3 = output0_tm_0 + tiles * 12; const __fp16 *output0_tm_4 = output0_tm_0 + tiles * 16; const __fp16 *output0_tm_5 = output0_tm_0 + tiles * 20; const __fp16 *output0_tm_6 = output0_tm_0 + tiles * 24; const __fp16 *output0_tm_7 = output0_tm_0 + tiles * 28; __fp16 *output0 = out0.row < __fp16 > (i * 6) + (j * 6) * 4; //TODO neon optimize for (int m = 0; m < 8; m++) { float16x4_t _out0tm0 = vld1_f16(output0_tm_0); float16x4_t _out0tm1 = vld1_f16(output0_tm_1); float16x4_t _out0tm2 = vld1_f16(output0_tm_2); float16x4_t _out0tm3 = vld1_f16(output0_tm_3); float16x4_t _out0tm4 = vld1_f16(output0_tm_4); float16x4_t _out0tm5 = vld1_f16(output0_tm_5); float16x4_t _out0tm6 = vld1_f16(output0_tm_6); float16x4_t _out0tm7 = vld1_f16(output0_tm_7); float16x4_t _tmp024a = vadd_f16(_out0tm1, _out0tm2); float16x4_t _tmp135a = vsub_f16(_out0tm1, _out0tm2); //float tmp024a = output0_tm[1] + output0_tm[2]; //float tmp135a = output0_tm[1] - output0_tm[2]; float16x4_t _tmp024b = vadd_f16(_out0tm3, _out0tm4); float16x4_t _tmp135b = vsub_f16(_out0tm3, _out0tm4); //float tmp024b = output0_tm[3] + output0_tm[4]; //float tmp135b = output0_tm[3] - output0_tm[4]; float16x4_t _tmp024c = vadd_f16(_out0tm5, _out0tm6); float16x4_t _tmp135c = vsub_f16(_out0tm5, _out0tm6); //float tmp024c = output0_tm[5] + output0_tm[6]; //float tmp135c = output0_tm[5] - output0_tm[6]; float16x4_t _tmp0m = vadd_f16(vadd_f16(_out0tm0, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32. f)); float16x4_t _tmp2m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4. f), _tmp024c, 8. f); float16x4_t _tmp4m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16. f), _tmp024c, 2. f); vst1_f16(tmp[0][m], _tmp0m); vst1_f16(tmp[2][m], _tmp2m); vst1_f16(tmp[4][m], _tmp4m); //tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; //tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; //tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; float16x4_t _tmp1m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2. f), _tmp135c, 16. f); float16x4_t _tmp3m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8. f), _tmp135c, 4. f); float16x4_t _tmp5m = vadd_f16(vadd_f16(_out0tm7, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32. f)); vst1_f16(tmp[1][m], _tmp1m); vst1_f16(tmp[3][m], _tmp3m); vst1_f16(tmp[5][m], _tmp5m); //tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; //tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; //tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float16x4_t _tmp00 = vld1_f16(tmp[m][0]); float16x4_t _tmp01 = vld1_f16(tmp[m][1]); float16x4_t _tmp02 = vld1_f16(tmp[m][2]); float16x4_t _tmp03 = vld1_f16(tmp[m][3]); float16x4_t _tmp04 = vld1_f16(tmp[m][4]); float16x4_t _tmp05 = vld1_f16(tmp[m][5]); float16x4_t _tmp06 = vld1_f16(tmp[m][6]); float16x4_t _tmp07 = vld1_f16(tmp[m][7]); float16x4_t _tmp024a = vadd_f16(_tmp01, _tmp02); float16x4_t _tmp135a = vsub_f16(_tmp01, _tmp02); //float tmp024a = tmp0[1] + tmp0[2]; //float tmp135a = tmp0[1] - tmp0[2]; float16x4_t _tmp024b = vadd_f16(_tmp03, _tmp04); float16x4_t _tmp135b = vsub_f16(_tmp03, _tmp04); //float tmp024b = tmp0[3] + tmp0[4]; //float tmp135b = tmp0[3] - tmp0[4]; float16x4_t _tmp024c = vadd_f16(_tmp05, _tmp06); float16x4_t _tmp135c = vsub_f16(_tmp05, _tmp06); //float tmp024c = tmp0[5] + tmp0[6]; //float tmp135c = tmp0[5] - tmp0[6]; float16x4_t _out00 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp00, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32. f))); float16x4_t _out02 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4. f), _tmp024c, 8. f)); float16x4_t _out04 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16. f), _tmp024c, 2. f)); vst1_f16(output0, _out00); vst1_f16(output0 + 8, _out02); vst1_f16(output0 + 16, _out04); //output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; //output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; //output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; float16x4_t _out01 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2. f), _tmp135c, 16. f)); float16x4_t _out03 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8. f), _tmp135c, 4. f)); float16x4_t _out05 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp07, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32. f))); vst1_f16(output0 + 4, _out01); vst1_f16(output0 + 12, _out03); vst1_f16(output0 + 20, _out05); //output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; //output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; //output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw * 4; } } } } } //END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
deconv_2d.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_OPS_DECONV_2D_H_ #define MACE_OPS_DECONV_2D_H_ #include "mace/core/types.h" namespace mace { namespace ops { enum FrameworkType { TENSORFLOW = 0, CAFFE = 1, }; template <typename T> void CropPadOut(const T *input, const index_t *in_shape, const index_t *out_shape, const index_t pad_h, const index_t pad_w, T *output) { const index_t batch = in_shape[0]; const index_t channel = in_shape[1]; const index_t in_height = in_shape[2]; const index_t in_width = in_shape[3]; const index_t out_height = out_shape[2]; const index_t out_width = out_shape[3]; #pragma omp parallel for collapse(3) for (int i = 0; i < batch; ++i) { for (int j = 0; j < channel; ++j) { for (int k = 0; k < out_height; ++k) { const T *input_base = input + ((i * channel + j) * in_height + (k + pad_h)) * in_width; T *output_base = output + ((i * channel + j) * out_height + k)* out_width; memcpy(output_base, input_base + pad_w, out_width * sizeof(T)); } } } } } // namespace ops } // namespace mace #endif // MACE_OPS_DECONV_2D_H_
// Copyright 2018 Xiaomi, Inc.All rights reserved. // //Licensed under the Apache License, Version 2.0(the "License"); //you may not use this file except in compliance with the License. // You may obtain a copy of the License at // //http://www.apache.org / licenses / LICENSE - 2.0 // //Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_OPS_DECONV_2D_H_ #define MACE_OPS_DECONV_2D_H_ #include "mace/core/types.h" namespace mace { namespace ops { enum FrameworkType { TENSORFLOW = 0, CAFFE = 1, }; template < typename T > void CropPadOut(const T * input, const index_t * in_shape, const index_t * out_shape, const index_t pad_h, const index_t pad_w, T * output) { const index_t batch = in_shape[0]; const index_t channel = in_shape[1]; const index_t in_height = in_shape[2]; const index_t in_width = in_shape[3]; const index_t out_height = out_shape[2]; const index_t out_width = out_shape[3]; for (int i = 0; i < batch; ++i) { for (int j = 0; j < channel; ++j) { for (int k = 0; k < out_height; ++k) { const T *input_base = input + ((i * channel + j) * in_height + (k + pad_h)) * in_width; T *output_base = output + ((i * channel + j) * out_height + k) * out_width; memcpy(output_base, input_base + pad_w, out_width * sizeof(T)); } } } } } //namespace ops } //namespace mace #endif /* // MACE_OPS_DECONV_2D_H_ */
// Copyright 2018 Xiaomi, Inc.All rights reserved. // //Licensed under the Apache License, Version 2.0(the "License"); //you may not use this file except in compliance with the License. // You may obtain a copy of the License at // //http://www.apache.org / licenses / LICENSE - 2.0 // //Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_OPS_DECONV_2D_H_ #define MACE_OPS_DECONV_2D_H_ #include "mace/core/types.h" namespace mace { namespace ops { enum FrameworkType { TENSORFLOW = 0, CAFFE = 1, }; template < typename T > void CropPadOut(const T * input, const index_t * in_shape, const index_t * out_shape, const index_t pad_h, const index_t pad_w, T * output) { const index_t batch = in_shape[0]; const index_t channel = in_shape[1]; const index_t in_height = in_shape[2]; const index_t in_width = in_shape[3]; const index_t out_height = out_shape[2]; const index_t out_width = out_shape[3]; #pragma omp parallel for collapse(3) for (int i = 0; i < batch; ++i) { for (int j = 0; j < channel; ++j) { for (int k = 0; k < out_height; ++k) { const T *input_base = input + ((i * channel + j) * in_height + (k + pad_h)) * in_width; T *output_base = output + ((i * channel + j) * out_height + k) * out_width; memcpy(output_base, input_base + pad_w, out_width * sizeof(T)); } } } } } //namespace ops } //namespace mace #endif /* // MACE_OPS_DECONV_2D_H_ */
bezier_classical_post_utility.h
// // Project Name: Kratos // Last Modified by: $Author: hbui $ // Date: $Date: 2013-10-12 $ // Revision: $Revision: 1.0 $ // // #if !defined(KRATOS_BEZIER_CLASSICAL_POST_UTILITY_H_INCLUDED ) #define KRATOS_BEZIER_CLASSICAL_POST_UTILITY_H_INCLUDED // System includes #include <string> #include <vector> #include <iostream> // External includes #include <omp.h> #include "boost/progress.hpp" #ifdef ISOGEOMETRIC_USE_MPI #include "mpi.h" #endif // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "includes/element.h" #include "includes/properties.h" #include "includes/ublas_interface.h" #include "includes/deprecated_variables.h" #include "includes/legacy_structural_app_vars.h" #include "spaces/ublas_space.h" #include "linear_solvers/linear_solver.h" #include "utilities/openmp_utils.h" #include "utilities/auto_collapse_spatial_binning.h" #include "custom_utilities/iga_define.h" #include "custom_geometries/isogeometric_geometry.h" #include "custom_utilities/isogeometric_utility.h" #include "custom_utilities/isogeometric_post_utility.h" #include "isogeometric_application/isogeometric_application.h" //#define DEBUG_LEVEL1 //#define DEBUG_LEVEL2 //#define DEBUG_MULTISOLVE //#define DEBUG_GENERATE_MESH #define ENABLE_PROFILING namespace Kratos { ///@addtogroup IsogeometricApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ template<class T> void AddToModelPart(ModelPart& rModelPart, typename T::Pointer pE); template<> void AddToModelPart<Element>(ModelPart& rModelPart, typename Element::Pointer pE) { rModelPart.AddElement(pE); } template<> void AddToModelPart<Condition>(ModelPart& rModelPart, typename Condition::Pointer pC) { rModelPart.AddCondition(pC); } ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** A simple utility to export directly the FEM mesh out from isogeometric Bezier mesh. Each Bezier element will generate its own set of FEM elements. Therefore a large amount of nodes and elements may be generated. One shall carefully use this utility for large problem. Previously, this class is named IsogeometricClassicalPostUtility. */ class BezierClassicalPostUtility : public IsogeometricPostUtility { public: ///@name Type Definitions ///@{ typedef boost::numeric::ublas::vector<double> ValuesContainerType; typedef boost::numeric::ublas::matrix<double> ValuesArrayContainerType; typedef typename ModelPart::NodesContainerType NodesArrayType; typedef typename ModelPart::ElementsContainerType ElementsArrayType; typedef typename ModelPart::ConditionsContainerType ConditionsArrayType; typedef typename Element::GeometryType GeometryType; typedef typename GeometryType::PointType NodeType; typedef IsogeometricGeometry<NodeType> IsogeometricGeometryType; typedef typename GeometryType::IntegrationPointsArrayType IntegrationPointsArrayType; typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType; typedef typename NodeType::DofsContainerType DofsContainerType; typedef UblasSpace<double, CompressedMatrix, Vector> SerialSparseSpaceType; typedef UblasSpace<double, Matrix, Vector> SerialDenseSpaceType; typedef LinearSolver<SerialSparseSpaceType, SerialDenseSpaceType> LinearSolverType; typedef std::size_t IndexType; /// Pointer definition of BezierClassicalPostUtility KRATOS_CLASS_POINTER_DEFINITION(BezierClassicalPostUtility); ///@} ///@name Life Cycle ///@{ /// Default constructor. BezierClassicalPostUtility(ModelPart::Pointer pModelPart) : mpModelPart(pModelPart) { } /// Destructor. virtual ~BezierClassicalPostUtility() { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /// Generate the post model_part from reference model_part /// Deprecated void GenerateModelPart(ModelPart::Pointer pModelPartPost, PostElementType postElementType) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); #endif #ifdef DEBUG_LEVEL1 std::cout << typeid(*this).name() << "::GenerateModelPart" << std::endl; #endif ElementsArrayType& pElements = mpModelPart->Elements(); #ifdef DEBUG_LEVEL1 std::cout << "Retrieved pElements" << std::endl; #endif std::string NodeKey = std::string("Node"); //select the correct post element type std::string element_name; if(postElementType == _TRIANGLE_) element_name = std::string("KinematicLinear2D3N"); else if(postElementType == _QUADRILATERAL_) element_name = std::string("KinematicLinear2D4N"); else if(postElementType == _TETRAHEDRA_) element_name = std::string("KinematicLinear3D4N"); else if(postElementType == _HEXAHEDRA_) element_name = std::string("KinematicLinear3D8N"); else KRATOS_THROW_ERROR(std::logic_error, "This element type is not supported for isogeometric post-processing", __FUNCTION__); if(!KratosComponents<Element>::Has(element_name)) { std::stringstream buffer; buffer << "Element " << element_name << " is not registered in Kratos."; buffer << " Please check the spelling of the element name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Element const& rCloneElement = KratosComponents<Element>::Get(element_name); IndexType NodeCounter = 0; IndexType ElementCounter = 0; boost::progress_display show_progress( pElements.size() ); for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { if((*it)->GetValue( IS_INACTIVE )) { // std::cout << "Element " << (*it)->Id() << " is inactive" << std::endl; continue; } int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(Dim) #endif //get the properties Properties::Pointer pDummyProperties = (*it)->pGetProperties(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(*pDummyProperties) #endif // generate list of nodes if(Dim == 1) { // TODO } else if(Dim == 2) { IndexType NumDivision1 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_2) ); IndexType i, j; CoordinatesArrayType p_ref; CoordinatesArrayType p; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes p_ref[2] = 0.0; for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; p = GlobalCoordinates((*it)->GetGeometry(), p, p_ref); NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(++NodeCounter); #ifdef DEBUG_GENERATE_MESH // if(NodeCounter == 585 || NodeCounter == 588 || NodeCounter == 589) if(NodeCounter) { std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl; } #endif // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&pModelPartPost->GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(pModelPartPost->GetBufferSize()); pModelPartPost->AddNode(pNewNode); mNodeToLocalCoordinates(pNewNode->Id()) = p_ref; mNodeToElement(pNewNode->Id()) = (*it)->Id(); } } //for correct mapping to element, the repetitive node is allowed. // pModelPartPost->Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(pModelPartPost->Nodes().size()) std::cout << "Generating Elements..." << std::endl; #endif // create and add element // Element::NodesArrayType temp_element_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // int Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; // int Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; // int Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; // int Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; // if(postElementType == _TRIANGLE_) // { // // TODO: check if jacobian checking is necessary // temp_element_nodes.clear(); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node2, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base())); // Element::Pointer NewElement1 = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties); // pModelPartPost->AddElement(NewElement1); // mOldToNewElements[(*it)->Id()].insert(ElementCounter); // temp_element_nodes.clear(); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node3, NodeKey).base())); // Element::Pointer NewElement2 = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties); // pModelPartPost->AddElement(NewElement2); // mOldToNewElements[(*it)->Id()].insert(ElementCounter); // } // else if(postElementType == _QUADRILATERAL_) // { // // TODO: check if jacobian checking is necessary // temp_element_nodes.clear(); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node2, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node3, NodeKey).base())); // Element::Pointer NewElement = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties); // pModelPartPost->AddElement(NewElement); // mOldToNewElements[(*it)->Id()].insert(ElementCounter); // } // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { IndexType Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; IndexType Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; IndexType Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; IndexType Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; if(postElementType == _TRIANGLE_) { connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4}); connectivities.push_back(std::vector<IndexType>{Node1, Node4, Node3}); } else if(postElementType == _QUADRILATERAL_) { connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3}); } } } ElementsArrayType pNewElements = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, Element, ElementsArrayType>( connectivities, *pModelPartPost, rCloneElement, ElementCounter, pDummyProperties, NodeKey); for (typename ElementsArrayType::ptr_iterator it2 = pNewElements.ptr_begin(); it2 != pNewElements.ptr_end(); ++it2) { pModelPartPost->AddElement(*it2); mOldToNewElements[(*it)->Id()].insert((*it2)->Id()); } pModelPartPost->Elements().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(pModelPartPost->Elements().size()) #endif } else if(Dim == 3) { IndexType NumDivision1 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_2) ); IndexType NumDivision3 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_3) ); IndexType i, j, k; CoordinatesArrayType p_ref; CoordinatesArrayType p; #ifdef DEBUG_LEVEL1 KRATOS_WATCH((*it)->Id()) KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) KRATOS_WATCH(NumDivision3) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; for(k = 0; k <= NumDivision3; ++k) { p_ref[2] = ((double) k) / NumDivision3; p = GlobalCoordinates((*it)->GetGeometry(), p, p_ref); NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(++NodeCounter); #ifdef DEBUG_GENERATE_MESH if(NodeCounter) { std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl; } #endif // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&pModelPartPost->GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(pModelPartPost->GetBufferSize()); pModelPartPost->AddNode(pNewNode); mNodeToLocalCoordinates(pNewNode->Id()) = p_ref; mNodeToElement(pNewNode->Id()) = (*it)->Id(); } } } //for correct mapping to element, the repetitive node is allowed. // pModelPartPost->Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(pModelPartPost->Nodes().size()) std::cout << "Generating Elements..." << std::endl; #endif // create and add element // Element::NodesArrayType temp_element_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // for(k = 0; k < NumDivision3; ++k) // { // int Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node5 = Node1 + 1; // int Node6 = Node2 + 1; // int Node7 = Node3 + 1; // int Node8 = Node4 + 1; // if(postElementType == _TETRAHEDRA_) // { // // TODO: check if jacobian checking is necessary // } // else if(postElementType == _HEXAHEDRA_) // { // // TODO: check if jacobian checking is necessary // temp_element_nodes.clear(); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node2, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node3, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node5, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node6, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node8, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node7, NodeKey).base())); // Element::Pointer NewElement = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties); // pModelPartPost->AddElement(NewElement); // mOldToNewElements[(*it)->Id()].insert(ElementCounter); // } // } // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { for(k = 0; k < NumDivision3; ++k) { IndexType Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node5 = Node1 + 1; IndexType Node6 = Node2 + 1; IndexType Node7 = Node3 + 1; IndexType Node8 = Node4 + 1; if(postElementType == _TETRAHEDRA_) { // TODO: check if creating Tetrahedra is correct connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4}); connectivities.push_back(std::vector<IndexType>{Node1, Node4, Node3}); } else if(postElementType == _HEXAHEDRA_) { connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3, Node5, Node6, Node8, Node7}); } } } } ElementsArrayType pNewElements = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, Element, ElementsArrayType>( connectivities, *pModelPartPost, rCloneElement, ElementCounter, pDummyProperties, NodeKey); for (typename ElementsArrayType::ptr_iterator it2 = pNewElements.ptr_begin(); it2 != pNewElements.ptr_end(); ++it2) { pModelPartPost->AddElement(*it2); mOldToNewElements[(*it)->Id()].insert((*it2)->Id()); } pModelPartPost->Elements().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(pModelPartPost->Elements().size()) #endif } ++show_progress; } #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "GeneratePostModelPart completed: " << (end_compute - start_compute) << " s" << std::endl; #else std::cout << "GeneratePostModelPart completed" << std::endl; #endif std::cout << NodeCounter << " nodes and " << ElementCounter << " elements are created" << std::endl; } /// Generate the post model_part from reference model_part /// this is the improved version of GenerateModelPart /// which uses template function to generate post Elements for both Element and Condition void GenerateModelPart2(ModelPart::Pointer pModelPartPost, const bool& generate_for_condition) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); #endif #ifdef DEBUG_LEVEL1 std::cout << typeid(*this).name() << "::GenerateModelPart" << std::endl; #endif ElementsArrayType& pElements = mpModelPart->Elements(); ConditionsArrayType& pConditions = mpModelPart->Conditions(); std::string NodeKey = std::string("Node"); IndexType NodeCounter = 0; IndexType ElementCounter = 0; boost::progress_display show_progress( pElements.size() ); std::vector<std::size_t> dummy_ids; for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { // This is wrong, we will not skill the IS_INACTIVE elements // TODO: to be deleted // if((*it)->GetValue( IS_INACTIVE )) // { //// std::cout << "Element " << (*it)->Id() << " is inactive" << std::endl; // ++show_progress; // continue; // } if((*it)->pGetGeometry() == 0) KRATOS_THROW_ERROR(std::logic_error, "Error: geometry is NULL at element", (*it)->Id()) int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(Dim) KRATOS_WATCH(ReducedDim) #endif //select the correct post element type std::string element_name; if((Dim == 2) && (ReducedDim == 2)) { element_name = std::string("KinematicLinear2D4N"); } else if((Dim == 3) && (ReducedDim == 2)) { element_name = std::string("KinematicLinear2D4N"); } else if((Dim == 3) && (ReducedDim == 3)) { element_name = std::string("KinematicLinear3D8N"); } else { std::stringstream ss; ss << "Invalid dimension of "; ss << typeid(*(*it)).name(); ss << ", Dim = " << Dim; ss << ", ReducedDim = " << ReducedDim; KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__); } if(!KratosComponents<Element>::Has(element_name)) { std::stringstream buffer; buffer << "Element " << element_name << " is not registered in Kratos."; buffer << " Please check the spelling of the element name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Element const& rCloneElement = KratosComponents<Element>::Get(element_name); GenerateForOneEntity<Element, ElementsArrayType, 1>(*pModelPartPost, *(*it), rCloneElement, NodeCounter_old, NodeCounter, ElementCounter, NodeKey, false, dummy_ids, dummy_ids, false); ++show_progress; } KRATOS_WATCH(ElementCounter) #ifdef DEBUG_LEVEL1 std::cout << "Done generating for elements" << std::endl; #endif IndexType ConditionCounter = 0; if (generate_for_condition) { boost::progress_display show_progress2( pConditions.size() ); for (typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it) { // This is wrong, we will not kill the IS_INACTIVE conditions // TODO: to be deleted // if((*it)->GetValue( IS_INACTIVE )) // { //// std::cout << "Condition " << (*it)->Id() << " is inactive" << std::endl; // ++show_progress2; // continue; // } if((*it)->pGetGeometry() == 0) KRATOS_THROW_ERROR(std::logic_error, "Error: geometry is NULL at condition", (*it)->Id()) int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(typeid((*it)->GetGeometry()).name()) KRATOS_WATCH(Dim) KRATOS_WATCH(ReducedDim) #endif //select the correct post condition type std::string condition_name; if(Dim == 3 && ReducedDim == 1) condition_name = std::string("LineForce3D2N"); else if(Dim == 3 && ReducedDim == 2) condition_name = std::string("FaceForce3D4N"); else { std::stringstream ss; ss << "Invalid dimension of "; ss << typeid(*(*it)).name(); ss << ", Dim = " << Dim; ss << ", ReducedDim = " << ReducedDim; ss << ". Condition " << (*it)->Id() << " will be skipped."; // KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__); continue; } if(!KratosComponents<Condition>::Has(condition_name)) { std::stringstream buffer; buffer << "Condition " << condition_name << " is not registered in Kratos."; buffer << " Please check the spelling of the condition name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Condition const& rCloneCondition = KratosComponents<Condition>::Get(condition_name); GenerateForOneEntity<Condition, ConditionsArrayType, 2>(*pModelPartPost, *(*it), rCloneCondition, NodeCounter_old, NodeCounter, ConditionCounter, NodeKey, false, dummy_ids, dummy_ids, false); ++show_progress2; } KRATOS_WATCH(ConditionCounter) } #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "GeneratePostModelPart2 completed: " << (end_compute - start_compute) << " s" << std::endl; #else std::cout << "GeneratePostModelPart2 completed" << std::endl; #endif std::cout << NodeCounter << " nodes and " << ElementCounter << " elements"; if (generate_for_condition) std::cout << ", " << ConditionCounter << " conditions"; std::cout << " are created" << std::endl; } // Generate the post model_part from reference model_part // this is the improved version of GenerateModelPart // which uses template function to generate post Elements for both Element and Condition // this version used a collapsing utility to collapse nodes automatically void GenerateModelPart2AutoCollapse(ModelPart::Pointer pModelPartPost, double dx, double dy, double dz, double tol) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); #endif #ifdef DEBUG_LEVEL1 std::cout << typeid(*this).name() << "::GenerateModelPart" << std::endl; #endif AutoCollapseSpatialBinning collapse_util(0.0, 0.0, 0.0, dx, dy, dz, tol); ElementsArrayType& pElements = mpModelPart->Elements(); ConditionsArrayType& pConditions = mpModelPart->Conditions(); std::string NodeKey = std::string("Node"); IndexType NodeCounter = 0; IndexType ElementCounter = 0; boost::progress_display show_progress( pElements.size() ); VectorMap<IndexType, IndexType> MapToCollapseNode; for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { if((*it)->GetValue( IS_INACTIVE )) { // std::cout << "Element " << (*it)->Id() << " is inactive" << std::endl; ++show_progress; continue; } int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(Dim) KRATOS_WATCH(ReducedDim) #endif //select the correct post element type std::string element_name; if(Dim == 2 && ReducedDim == 2) element_name = std::string("KinematicLinear2D4N"); else if(Dim == 3 && ReducedDim == 3) element_name = std::string("KinematicLinear3D8N"); else { std::stringstream ss; ss << "Invalid dimension of "; ss << typeid(*(*it)).name(); ss << ", Dim = " << Dim; ss << ", ReducedDim = " << ReducedDim; KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__); } if(!KratosComponents<Element>::Has(element_name)) { std::stringstream buffer; buffer << "Element " << element_name << " is not registered in Kratos."; buffer << " Please check the spelling of the element name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Element const& rCloneElement = KratosComponents<Element>::Get(element_name); GenerateForOneEntityAutoCollapse<Element, ElementsArrayType, 1>(collapse_util, *pModelPartPost, *(*it), rCloneElement, MapToCollapseNode, NodeCounter_old, NodeCounter, ElementCounter, NodeKey); ++show_progress; } #ifdef DEBUG_LEVEL1 std::cout << "Done generating for elements" << std::endl; #endif IndexType ConditionCounter = 0; boost::progress_display show_progress2( pConditions.size() ); for (typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it) { if((*it)->GetValue( IS_INACTIVE )) { // std::cout << "Condition " << (*it)->Id() << " is inactive" << std::endl; ++show_progress2; continue; } int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(typeid((*it)->GetGeometry()).name()) KRATOS_WATCH(Dim) KRATOS_WATCH(ReducedDim) #endif //select the correct post condition type std::string condition_name; if(Dim == 3 && ReducedDim == 1) condition_name = std::string("LineForce3D2N"); else if(Dim == 3 && ReducedDim == 2) condition_name = std::string("FaceForce3D4N"); else { std::stringstream ss; ss << "Invalid dimension of "; ss << typeid(*(*it)).name(); ss << ", Dim = " << Dim; ss << ", ReducedDim = " << ReducedDim; KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__); } if(!KratosComponents<Condition>::Has(condition_name)) { std::stringstream buffer; buffer << "Condition " << condition_name << " is not registered in Kratos."; buffer << " Please check the spelling of the condition name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Condition const& rCloneCondition = KratosComponents<Condition>::Get(condition_name); GenerateForOneEntityAutoCollapse<Condition, ConditionsArrayType, 2>(collapse_util, *pModelPartPost, *(*it), rCloneCondition, MapToCollapseNode, NodeCounter_old, NodeCounter, ConditionCounter, NodeKey); ++show_progress2; } #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Generate PostModelPart completed: " << (end_compute - start_compute) << " s" << std::endl; #else std::cout << "Generate PostModelPart completed" << std::endl; #endif std::cout << NodeCounter << " nodes and " << ElementCounter << " elements" << ", " << ConditionCounter << " conditions are created" << std::endl; } /** * Utility function to generate elements/conditions for element/condition * if TEntityType==Element, type must be 1; if T==Condition, type is 2 */ template<class TEntityType, class TEntityContainerType, std::size_t type> void GenerateForOneEntity(ModelPart& rModelPart, TEntityType& rE, TEntityType const& rSample, IndexType NodeCounter_old, IndexType& NodeCounter, IndexType& EntityCounter, const std::string& NodeKey, const bool& transfer_nodal_var, std::vector<std::size_t>& node_ids, std::vector<std::size_t>& element_ids, const bool& get_indices) { // int ReducedDim = rE.GetGeometry().WorkingSpaceDimension(); int ReducedDim = rE.GetGeometry().Dimension(); //get the properties Properties::Pointer pDummyProperties = rE.pGetProperties(); #ifdef DEBUG_LEVEL1 std::cout << "Generating for " << rE.Info() << std::endl; KRATOS_WATCH(*pDummyProperties) KRATOS_WATCH(EntityCounter) #endif // generate list of nodes if(ReducedDim == 1) { // TODO } else if(ReducedDim == 2) { IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) ); IndexType i, j; CoordinatesArrayType p_ref; CoordinatesArrayType p; Vector shape_values; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes p_ref[2] = 0.0; for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; p = GlobalCoordinates(rE.GetGeometry(), p, p_ref); NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(++NodeCounter); // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(rModelPart.GetBufferSize()); rModelPart.AddNode(pNewNode); if(type == 1) { mNodeToLocalCoordinates(pNewNode->Id()) = p_ref; mNodeToElement(pNewNode->Id()) = rE.Id(); } if (transfer_nodal_var) { shape_values = rE.GetGeometry().ShapeFunctionsValues(shape_values, p_ref); VariablesList& var_list = rModelPart.GetNodalSolutionStepVariablesList(); for (VariablesList::const_iterator it = var_list.begin(); it != var_list.end(); ++it) { if (typeid(*it) == typeid(Variable<double>)) { const Variable<double>& my_variable = dynamic_cast<const Variable<double>&>(*it); double value = 0.0; for (std::size_t n = 0; n < rE.GetGeometry().size(); ++n) value += shape_values[n] * rE.GetGeometry()[n].GetSolutionStepValue(my_variable); pNewNode->GetSolutionStepValue(my_variable) = value; } else if (typeid(*it) == typeid(Variable<array_1d<double, 3> >)) { const Variable<array_1d<double, 3> >& my_variable = dynamic_cast<const Variable<array_1d<double, 3> >&>(*it); array_1d<double, 3> value; noalias(value) = ZeroVector(3); for (std::size_t n = 0; n < rE.GetGeometry().size(); ++n) noalias(value) += shape_values[n] * rE.GetGeometry()[n].GetSolutionStepValue(my_variable); pNewNode->GetSolutionStepValue(my_variable) = value; } } } if (get_indices) node_ids.push_back(pNewNode->Id()); } } //for correct mapping to element, the repetitive node is allowed. // rModelPart.Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rModelPart.Nodes().size()) if(type == 1) std::cout << "Generating Elements..." << std::endl; else std::cout << "Generating Conditions..." << std::endl; #endif // create and add element // typename T::NodesArrayType temp_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // int Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; // int Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; // int Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; // int Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; // // TODO: check if jacobian checking is necessary // temp_nodes.clear(); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node1, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node2, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node4, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node3, NodeKey).base())); // int NewEntityId = ++EntityCounter; // // int NewEntityId = rE.Id(); ++EntityCounter; // typename TEntityType::Pointer NewEntity = rSample.Create(NewEntityId, temp_nodes, pDummyProperties); // AddToModelPart<TEntityType>(rModelPart, NewEntity); // if(type == 1) // mOldToNewElements[rE.Id()].insert(NewEntityId); // else if(type == 2) // mOldToNewConditions[rE.Id()].insert(NewEntityId); // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { IndexType Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; IndexType Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; IndexType Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; IndexType Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3}); } } TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>( connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey); for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { AddToModelPart<TEntityType>(rModelPart, *it2); if(type == 1) mOldToNewElements[rE.Id()].insert((*it2)->Id()); else if(type == 2) mOldToNewConditions[rE.Id()].insert((*it2)->Id()); } if(type == 1) rModelPart.Elements().Unique(); else if(type == 2) rModelPart.Conditions().Unique(); #ifdef DEBUG_LEVEL1 if(type == 1) KRATOS_WATCH(rModelPart.Elements().size()) else KRATOS_WATCH(rModelPart.Conditions().size()) #endif if (get_indices) { for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { element_ids.push_back((*it2)->Id()); } } } else if(ReducedDim == 3) { IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) ); IndexType NumDivision3 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_3) ); IndexType i, j, k; CoordinatesArrayType p_ref; CoordinatesArrayType p; Vector shape_values; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rE.Id()) KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) KRATOS_WATCH(NumDivision3) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; for(k = 0; k <= NumDivision3; ++k) { p_ref[2] = ((double) k) / NumDivision3; p = GlobalCoordinates(rE.GetGeometry(), p, p_ref); NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(++NodeCounter); #ifdef DEBUG_GENERATE_MESH if(NodeCounter) { std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl; } #endif // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(rModelPart.GetBufferSize()); rModelPart.AddNode(pNewNode); if(type == 1) { mNodeToLocalCoordinates(pNewNode->Id()) = p_ref; mNodeToElement(pNewNode->Id()) = rE.Id(); } if (transfer_nodal_var) { shape_values = rE.GetGeometry().ShapeFunctionsValues(shape_values, p_ref); VariablesList& var_list = rModelPart.GetNodalSolutionStepVariablesList(); for (VariablesList::const_iterator it = var_list.begin(); it != var_list.end(); ++it) { if (typeid(*it) == typeid(Variable<double>)) { const Variable<double>& my_variable = dynamic_cast<const Variable<double>&>(*it); double value = 0.0; for (std::size_t n = 0; n < rE.GetGeometry().size(); ++n) value += shape_values[n] * rE.GetGeometry()[n].GetSolutionStepValue(my_variable); pNewNode->GetSolutionStepValue(my_variable) = value; } else if (typeid(*it) == typeid(Variable<array_1d<double, 3> >)) { const Variable<array_1d<double, 3> >& my_variable = dynamic_cast<const Variable<array_1d<double, 3> >&>(*it); array_1d<double, 3> value; noalias(value) = ZeroVector(3); for (std::size_t n = 0; n < rE.GetGeometry().size(); ++n) noalias(value) += shape_values[n] * rE.GetGeometry()[n].GetSolutionStepValue(my_variable); pNewNode->GetSolutionStepValue(my_variable) = value; } } } if (get_indices) node_ids.push_back(pNewNode->Id()); } } } //for correct mapping to element, the repetitive node is allowed. // rModelPart.Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rModelPart.Nodes().size()) if(type == 1) std::cout << "Generating Elements..." << std::endl; else std::cout << "Generating Conditions..." << std::endl; #endif // create and add element // typename T::NodesArrayType temp_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // for(k = 0; k < NumDivision3; ++k) // { // int Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node5 = Node1 + 1; // int Node6 = Node2 + 1; // int Node7 = Node3 + 1; // int Node8 = Node4 + 1; // // TODO: check if jacobian checking is necessary // temp_nodes.clear(); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node1, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node2, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node4, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node3, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node5, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node6, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node8, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node7, NodeKey).base())); // int NewEntityId = ++EntityCounter; // typename TEntityType::Pointer NewEntity = rSample.Create(NewEntityId, temp_nodes, pDummyProperties); // AddToModelPart<TEntityType>(rModelPart, NewEntity); // if(type == 1) // mOldToNewElements[rE.Id()].insert(NewEntityId); // else if(type == 2) // mOldToNewConditions[rE.Id()].insert(NewEntityId); // } // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { for(k = 0; k < NumDivision3; ++k) { IndexType Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node5 = Node1 + 1; IndexType Node6 = Node2 + 1; IndexType Node7 = Node3 + 1; IndexType Node8 = Node4 + 1; connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3, Node5, Node6, Node8, Node7}); } } } TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>( connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey); for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { AddToModelPart<TEntityType>(rModelPart, *it2); if(type == 1) mOldToNewElements[rE.Id()].insert((*it2)->Id()); else if(type == 2) mOldToNewConditions[rE.Id()].insert((*it2)->Id()); } if(type == 1) rModelPart.Elements().Unique(); else if(type == 2) rModelPart.Conditions().Unique(); #ifdef DEBUG_LEVEL1 if(type == 1) KRATOS_WATCH(rModelPart.Elements().size()) else KRATOS_WATCH(rModelPart.Conditions().size()) #endif if (get_indices) { for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { element_ids.push_back((*it2)->Id()); } } } } /** * Utility function to generate elements/conditions for element/condition. * This uses a collapse utility to automatically merge the coincident nodes * if T==Element, type must be 1; otherwise type=2 */ template<class TEntityType, class TEntityContainerType, std::size_t type> void GenerateForOneEntityAutoCollapse(AutoCollapseSpatialBinning& collapse_util, ModelPart& rModelPart, TEntityType& rE, TEntityType const& rSample, VectorMap<IndexType, IndexType>& rMapToCollapseNode, IndexType NodeCounter_old, IndexType& NodeCounter, IndexType& EntityCounter, const std::string& NodeKey) { // int ReducedDim = rE.GetGeometry().WorkingSpaceDimension(); int ReducedDim = rE.GetGeometry().Dimension(); //get the properties Properties::Pointer pDummyProperties = rE.pGetProperties(); #ifdef DEBUG_LEVEL1 if(type == 1) std::cout << "Generating for element " << rE.Id() << std::endl; else std::cout << "Generating for condition " << rE.Id() << std::endl; KRATOS_WATCH(*pDummyProperties) #endif // generate list of nodes if(ReducedDim == 1) { // TODO } else if(ReducedDim == 2) { IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) ); IndexType i, j; CoordinatesArrayType p_ref; CoordinatesArrayType p; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes p_ref[2] = 0.0; for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; p = GlobalCoordinates(rE.GetGeometry(), p, p_ref); IndexType id = static_cast<IndexType>( collapse_util.AddNode(p[0], p[1], p[2]) ); ++NodeCounter; rMapToCollapseNode[NodeCounter] = id; if(rModelPart.Nodes().find(id) == rModelPart.Nodes().end()) { // this is a new node NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(id); // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(rModelPart.GetBufferSize()); rModelPart.AddNode(pNewNode); } else { // this is an old node, not required to add to model_part // so do nothing } // in this way, the node will always point to the last local coodinates and element if(type == 1) { mNodeToLocalCoordinates(id) = p_ref; mNodeToElement(id) = rE.Id(); } } } //for correct mapping to element, the repetitive node is allowed. // rModelPart.Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rModelPart.Nodes().size()) if(type == 1) std::cout << "Generating Elements..." << std::endl; else std::cout << "Generating Conditions..." << std::endl; #endif // create and add element // typename T::NodesArrayType temp_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // int Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; // int Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; // int Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; // int Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; // // TODO: check if jacobian checking is necessary // temp_nodes.clear(); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node1], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node2], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node4], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node3], NodeKey).base())); // typename T::Pointer NewEntity = rSample.Create(++EntityCounter, temp_nodes, pDummyProperties); // AddToModelPart<T>(rModelPart, NewEntity); // if(type == 1) // mOldToNewElements[rE.Id()].insert(EntityCounter); // else if(type == 2) // mOldToNewConditions[rE.Id()].insert(EntityCounter); // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { IndexType Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; IndexType Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; IndexType Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; IndexType Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; connectivities.push_back(std::vector<IndexType>{ rMapToCollapseNode[Node1], rMapToCollapseNode[Node2], rMapToCollapseNode[Node4], rMapToCollapseNode[Node3]}); } } TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>( connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey); for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { AddToModelPart<TEntityType>(rModelPart, *it2); if(type == 1) mOldToNewElements[rE.Id()].insert((*it2)->Id()); else if(type == 2) mOldToNewConditions[rE.Id()].insert((*it2)->Id()); } if(type == 1) rModelPart.Elements().Unique(); else if(type == 2) rModelPart.Conditions().Unique(); #ifdef DEBUG_LEVEL1 if(type == 1) KRATOS_WATCH(rModelPart.Elements().size()) else KRATOS_WATCH(rModelPart.Conditions().size()) #endif } else if(ReducedDim == 3) { IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) ); IndexType NumDivision3 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_3) ); IndexType i, j, k; CoordinatesArrayType p_ref; CoordinatesArrayType p; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rE.Id()) KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) KRATOS_WATCH(NumDivision3) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; for(k = 0; k <= NumDivision3; ++k) { p_ref[2] = ((double) k) / NumDivision3; p = GlobalCoordinates(rE.GetGeometry(), p, p_ref); IndexType id = static_cast<IndexType>( collapse_util.AddNode(p[0], p[1], p[2]) ); ++NodeCounter; rMapToCollapseNode[NodeCounter] = id; if(rModelPart.Nodes().find(id) == rModelPart.Nodes().end()) { // this is a new node NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(id); #ifdef DEBUG_GENERATE_MESH if(NodeCounter) { std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl; } #endif // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(rModelPart.GetBufferSize()); rModelPart.AddNode(pNewNode); } else { // this is an old node, not required to add to model_part // so do nothing } // in this way, the node will always point to the last local coodinates and element if(type == 1) { mNodeToLocalCoordinates(id) = p_ref; mNodeToElement(id) = rE.Id(); } } } } //for correct mapping to element, the repetitive node is allowed. // rModelPart.Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rModelPart.Nodes().size()) if(type == 1) std::cout << "Generating Elements..." << std::endl; else std::cout << "Generating Conditions..." << std::endl; #endif // create and add element // typename T::NodesArrayType temp_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // for(k = 0; k < NumDivision3; ++k) // { // int Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node5 = Node1 + 1; // int Node6 = Node2 + 1; // int Node7 = Node3 + 1; // int Node8 = Node4 + 1; // // TODO: check if jacobian checking is necessary // temp_nodes.clear(); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node1], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node2], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node4], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node3], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node5], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node6], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node8], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node7], NodeKey).base())); // typename T::Pointer NewEntity = rSample.Create(++EntityCounter, temp_nodes, pDummyProperties); // AddToModelPart<T>(rModelPart, NewEntity); // if(type == 1) // mOldToNewElements[rE.Id()].insert(EntityCounter); // else if(type == 2) // mOldToNewConditions[rE.Id()].insert(EntityCounter); // } // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { for(k = 0; k < NumDivision3; ++k) { IndexType Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node5 = Node1 + 1; IndexType Node6 = Node2 + 1; IndexType Node7 = Node3 + 1; IndexType Node8 = Node4 + 1; connectivities.push_back(std::vector<IndexType>{ rMapToCollapseNode[Node1], rMapToCollapseNode[Node2], rMapToCollapseNode[Node4], rMapToCollapseNode[Node3], rMapToCollapseNode[Node5], rMapToCollapseNode[Node6], rMapToCollapseNode[Node8], rMapToCollapseNode[Node7]}); } } } TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>( connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey); for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { AddToModelPart<TEntityType>(rModelPart, *it2); if(type == 1) mOldToNewElements[rE.Id()].insert((*it2)->Id()); else if(type == 2) mOldToNewConditions[rE.Id()].insert((*it2)->Id()); } if(type == 1) rModelPart.Elements().Unique(); else if(type == 2) rModelPart.Conditions().Unique(); #ifdef DEBUG_LEVEL1 if(type == 1) KRATOS_WATCH(rModelPart.Elements().size()) else KRATOS_WATCH(rModelPart.Conditions().size()) #endif } } // Synchronize the activation between model_parts void SynchronizeActivation(ModelPart::Pointer pModelPartPost) { ElementsArrayType& pElements = mpModelPart->Elements(); for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { std::set<IndexType> NewElements = mOldToNewElements[(*it)->Id()]; for(std::set<IndexType>::iterator it2 = NewElements.begin(); it2 != NewElements.end(); ++it2) { pModelPartPost->GetElement(*it2).GetValue(IS_INACTIVE) = (*it)->GetValue( IS_INACTIVE ); } } ConditionsArrayType& pConditions = mpModelPart->Conditions(); for (typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it) { std::set<IndexType> NewConditions = mOldToNewConditions[(*it)->Id()]; for(std::set<IndexType>::iterator it2 = NewConditions.begin(); it2 != NewConditions.end(); ++it2) { pModelPartPost->GetCondition(*it2).GetValue(IS_INACTIVE) = (*it)->GetValue( IS_INACTIVE ); } } } // transfer the elemental data template<class TVariableType> void TransferElementalData(const TVariableType& rThisVariable, ModelPart::Pointer pModelPartPost) { ElementsArrayType& pElements = mpModelPart->Elements(); for(typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { std::set<IndexType> NewElements = mOldToNewElements[(*it)->Id()]; for(std::set<IndexType>::iterator it2 = NewElements.begin(); it2 != NewElements.end(); ++it2) { pModelPartPost->GetElement(*it2).GetValue(rThisVariable) = (*it)->GetValue(rThisVariable); } } } // transfer the conditional data template<class TVariableType> void TransferConditionalData(const TVariableType& rThisVariable, ModelPart::Pointer pModelPartPost) { ConditionsArrayType& pConditions = mpModelPart->Conditions(); for(typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it) { std::set<IndexType> NewConditions = mOldToNewConditions[(*it)->Id()]; for(std::set<IndexType>::iterator it2 = NewConditions.begin(); it2 != NewConditions.end(); ++it2) { pModelPartPost->GetCondition(*it2).GetValue(rThisVariable) = (*it)->GetValue(rThisVariable); } } } // Synchronize post model_part with the reference model_part template<class TVariableType> void TransferNodalResults( const TVariableType& rThisVariable, const ModelPart::Pointer pModelPartPost ) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); #endif NodesArrayType& pTargetNodes = pModelPartPost->Nodes(); ElementsArrayType& pElements = mpModelPart->Elements(); typename TVariableType::Type Results; CoordinatesArrayType LocalPos; IndexType ElementId; // #pragma omp parallel for //TODO: check this. This is not parallelized. for(NodesArrayType::ptr_iterator it = pTargetNodes.ptr_begin(); it != pTargetNodes.ptr_end(); ++it) { IndexType key = (*it)->Id(); if(mNodeToElement.find(key) != mNodeToElement.end()) { ElementId = mNodeToElement[key]; if( ! pElements(ElementId)->GetValue(IS_INACTIVE) ) // skip the inactive elements { noalias(LocalPos) = mNodeToLocalCoordinates[key]; Results = CalculateOnPoint(rThisVariable, Results, pElements(ElementId), LocalPos); (*it)->GetSolutionStepValue(rThisVariable) = Results; } } } #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Transfer nodal point results for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << " s" << std::endl; #endif } // Synchronize post model_part with the reference model_part template<class TVariableType> void TransferIntegrationPointResults( const TVariableType& rThisVariable, const ModelPart::Pointer pModelPartPost, LinearSolverType::Pointer pSolver ) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); std::cout << "########################################" << std::endl; std::cout << "Transfer integration point results for " << rThisVariable.Name() << " starts" << std::endl; #endif // firstly transfer rThisVariable from integration points of reference model_part to its nodes TransferVariablesToNodes(pSolver, mpModelPart, rThisVariable); // secondly transfer new nodal variables results to the post model_part TransferNodalResults(rThisVariable, pModelPartPost); #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Transfer integration point results for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << "s" << std::endl; std::cout << "########################################" << std::endl; #endif } // Transfer the variable to nodes for model_part template<class TVariableType> void TransferVariablesToNodes( const TVariableType& rThisVariable, ModelPart::Pointer pModelPart, LinearSolverType::Pointer pSolver ) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); std::cout << "########################################" << std::endl; std::cout << "Transfer integration point results to nodes for " << rThisVariable.Name() << " starts" << std::endl; #endif TransferVariablesToNodes(pSolver, pModelPart, rThisVariable); #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Transfer integration point results to nodes for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << "s" << std::endl; std::cout << "########################################" << std::endl; #endif } /** * Utility function to renumber the nodes of the post model_part (for parallel merge) */ void GlobalNodalRenumbering(ModelPart::Pointer pModelPartPost) { #ifdef ISOGEOMETRIC_USE_MPI int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); // gather the number of nodes on each process int NumberOfNodes[size]; int MyNumberOfNodes = pModelPartPost->NumberOfNodes(); MPI_Allgather(&MyNumberOfNodes, 1, MPI_INT, NumberOfNodes, 1, MPI_INT, MPI_COMM_WORLD); // std::cout << "NumberOfNodes:"; // for(int i = 0; i < size; ++i) // std::cout << " " << NumberOfNodes[i]; // std::cout << std::endl; // compute the numbering offset int offset = 0; for(int i = 0; i < rank; ++i) offset += NumberOfNodes[i]; // renumber the nodes of the current process for(ModelPart::NodeIterator it = pModelPartPost->NodesBegin(); it != pModelPartPost->NodesEnd(); ++it) { it->SetId(++offset); it->GetSolutionStepValue(PARTITION_INDEX) = rank; } if(rank == 0) std::cout << "Global renumbering completed" << std::endl; #endif } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "BezierClassicalPostUtility"; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "BezierClassicalPostUtility"; } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ModelPart::Pointer mpModelPart; // pointer variable to a model_part VectorMap<IndexType, CoordinatesArrayType> mNodeToLocalCoordinates; // vector map to store local coordinates of node on a NURBS entity VectorMap<IndexType, IndexType> mNodeToElement; // vector map to store local coordinates of node on a NURBS entity std::map<IndexType, std::set<IndexType> > mOldToNewElements; // vector map to store id map from old element to new elements std::map<IndexType, std::set<IndexType> > mOldToNewConditions; // vector map to store id map from old condition to new conditions ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * Calculate global coodinates w.r.t initial configuration */ CoordinatesArrayType& GlobalCoordinates( GeometryType& rGeometry, CoordinatesArrayType& rResult, CoordinatesArrayType const& LocalCoordinates ) { noalias( rResult ) = ZeroVector( 3 ); Vector ShapeFunctionsValues; rGeometry.ShapeFunctionsValues(ShapeFunctionsValues, LocalCoordinates); for ( IndexType i = 0 ; i < rGeometry.size() ; ++i ) { noalias( rResult ) += ShapeFunctionsValues( i ) * rGeometry.GetPoint( i ).GetInitialPosition(); } return rResult; } /** * Interpolation on element */ double CalculateOnPoint( const Variable<double>& rVariable, double& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates ) { Vector N; pElement->GetGeometry().ShapeFunctionsValues(N, rCoordinates); rResult = 0.0; for(unsigned int i = 0; i < pElement->GetGeometry().size(); ++i) { double NodalValues = pElement->GetGeometry()[i].GetSolutionStepValue(rVariable); rResult += N( i ) * NodalValues; } return rResult; } /** * Interpolation on element */ Vector& CalculateOnPoint( const Variable<Vector>& rVariable, Vector& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates ) { Vector N; pElement->GetGeometry().ShapeFunctionsValues(N, rCoordinates); for(unsigned int i = 0; i < pElement->GetGeometry().size(); ++i) { Vector& NodalValues = pElement->GetGeometry()[i].GetSolutionStepValue(rVariable); if(i == 0) { rResult = N( i ) * NodalValues; } else { noalias(rResult) += N( i ) * NodalValues; } } return rResult; } /** * Interpolation on element */ array_1d<double, 3>& CalculateOnPoint( const Variable<array_1d<double, 3> >& rVariable, array_1d<double, 3>& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates ) { Vector N; pElement->GetGeometry().ShapeFunctionsValues(N, rCoordinates); rResult[0] = 0.0; rResult[1] = 0.0; rResult[2] = 0.0; for(unsigned int i = 0; i < pElement->GetGeometry().size(); ++i) { array_1d<double, 3> NodalValues = pElement->GetGeometry()[i].GetSolutionStepValue(rVariable); rResult += N( i ) * NodalValues; } return rResult; } /** * Transfer variable at integration points to nodes * * @param pSolver the solver used for solving the local system matrix * @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes * @param rThisVariable the variable need to transfer the respected values */ void TransferVariablesToNodes( LinearSolverType::Pointer& pSolver, ModelPart::Pointer& pModelPart, const Variable<double>& rThisVariable ) { ElementsArrayType& ElementsArray= pModelPart->Elements(); //Initialize system of equations int NumberOfNodes = pModelPart->NumberOfNodes(); SerialSparseSpaceType::MatrixType M(NumberOfNodes, NumberOfNodes); noalias(M)= ZeroMatrix(NumberOfNodes, NumberOfNodes); SerialSparseSpaceType::VectorType g(NumberOfNodes); noalias(g)= ZeroVector(NumberOfNodes); SerialSparseSpaceType::VectorType b(NumberOfNodes); noalias(b)= ZeroVector(NumberOfNodes); // create the structure for M a priori ConstructL2MatrixStructure<Element>(M, ElementsArray); // Transfer of GaussianVariables to Nodal Variables via L_2-Minimization // see Jiao + Heath "Common-refinement-based data tranfer ..." // International Journal for numerical methods in engineering 61 (2004) 2402--2427 // for general description of L_2-Minimization // set up the system of equations //create a partition of the element array int number_of_threads = omp_get_max_threads(); std::vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, ElementsArray.size(), element_partition); KRATOS_WATCH( number_of_threads ) std::cout << "element_partition:"; for (std::size_t i = 0; i < element_partition.size(); ++i) std::cout << " " << element_partition[i]; std::cout << std::endl; //create the array of lock std::vector< omp_lock_t > lock_array(NumberOfNodes); for(unsigned int i = 0; i < NumberOfNodes; ++i) omp_init_lock(&lock_array[i]); #pragma omp parallel for for(int k = 0; k < number_of_threads; ++k) { Matrix InvJ(3, 3); double DetJ; unsigned int row, col; typename ElementsArrayType::ptr_iterator it_begin = ElementsArray.ptr_begin() + element_partition[k]; typename ElementsArrayType::ptr_iterator it_end = ElementsArray.ptr_begin() + element_partition[k + 1]; for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it ) { if(!(*it)->GetValue(IS_INACTIVE)) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); // J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); // const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); IsogeometricGeometryType& rIsogeometricGeometry = dynamic_cast<IsogeometricGeometryType&>((*it)->GetGeometry()); J = rIsogeometricGeometry.Jacobian0(J, (*it)->GetIntegrationMethod()); GeometryType::ShapeFunctionsGradientsType DN_De; Matrix Ncontainer; rIsogeometricGeometry.CalculateShapeFunctionsIntegrationPointsValuesAndLocalGradients( Ncontainer, DN_De, (*it)->GetIntegrationMethod() ); // get the values at the integration_points std::vector<double> ValuesOnIntPoint(integration_points.size()); (*it)->CalculateOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, pModelPart->GetProcessInfo()); for(unsigned int point = 0; point< integration_points.size(); ++point) { MathUtils<double>::InvertMatrix(J[point], InvJ, DetJ); double dV = DetJ * integration_points[point].Weight(); for(unsigned int prim = 0 ; prim < (*it)->GetGeometry().size(); ++prim) { row = (*it)->GetGeometry()[prim].Id()-1; omp_set_lock(&lock_array[row]); b(row) += (ValuesOnIntPoint[point]) * Ncontainer(point, prim) * dV; for(unsigned int sec = 0 ; sec < (*it)->GetGeometry().size(); ++sec) { col = (*it)->GetGeometry()[sec].Id()-1; M(row, col) += Ncontainer(point, prim) * Ncontainer(point, sec) * dV; } omp_unset_lock(&lock_array[row]); } } } else { // for inactive elements the contribution to LHS is identity matrix and RHS is zero for(unsigned int prim = 0 ; prim < (*it)->GetGeometry().size(); ++prim) { row = (*it)->GetGeometry()[prim].Id()-1; omp_set_lock(&lock_array[row]); // b(row) += 0.0; for(unsigned int sec = 0 ; sec < (*it)->GetGeometry().size(); ++sec) { col = (*it)->GetGeometry()[sec].Id()-1; if(col == row) M(row, col) += 1.0; // else // M(row, col) += 0.0; } omp_unset_lock(&lock_array[row]); } } } } for(unsigned int i = 0; i < NumberOfNodes; ++i) omp_destroy_lock(&lock_array[i]); // solver the system pSolver->Solve(M, g, b); // transfer the solution to the nodal variables for(ModelPart::NodeIterator it = pModelPart->NodesBegin(); it != pModelPart->NodesEnd(); ++it) { it->GetSolutionStepValue(rThisVariable) = g((it->Id()-1)); } } /** * Transfer of rThisVariable defined on integration points to corresponding * nodal values. The transformation is done in a form that ensures a minimization * of L_2-norm error (/sum{rThisVariable- f(x)) whereas * f(x)= /sum{shape_func_i*rThisVariable_i} * @param model_part model_part on which the transfer should be done * @param rThisVariable Matrix-Variable which should be transferred * @see TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Matrix>& rThisVariable) * @see TransferVariablesToNodes(ModelPart& model_part, Variable<double>& rThisVariable) * @ref Jiao&Heath: "Common-refinement-based data transfer...", Int. * Journal for numer. meth. in eng. 61 (2004) 2402--2427 * WARNING: this may cause segmentation faults as the respective variables * will be created on nodal level while they are originally intended to be * stored on integration points! * * @param pSolver the solver used for solving the local system matrix * @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes * @param rThisVariable the variable need to transfer the respected values */ void TransferVariablesToNodes( LinearSolverType::Pointer& pSolver, ModelPart::Pointer& pModelPart, const Variable<Vector>& rThisVariable ) { ElementsArrayType& ElementsArray = pModelPart->Elements(); const unsigned int& Dim = (*(ElementsArray.ptr_begin()))->GetGeometry().WorkingSpaceDimension(); unsigned int VariableSize; if(rThisVariable.Name() == std::string("STRESSES") || rThisVariable.Name() == std::string("PLASTIC_STRAIN_VECTOR") || rThisVariable.Name() == std::string("PRESTRESS") || rThisVariable.Name() == std::string("STRAIN") // TODO: extend for more variables ) { VariableSize = Dim * (Dim + 1) / 2; } else KRATOS_THROW_ERROR(std::logic_error, rThisVariable.Name(), " is not a supported variable for TransferVariablesToNodes routine.") #ifdef ENABLE_PROFILING //profiling variables double start_compute, end_compute; start_compute = OpenMPUtils::GetCurrentTime(); #endif //Initialize system of equations unsigned int NumberOfNodes = pModelPart->NumberOfNodes(); SerialSparseSpaceType::MatrixType M(NumberOfNodes, NumberOfNodes); noalias(M)= ZeroMatrix(NumberOfNodes, NumberOfNodes); // create the structure for M a priori ConstructL2MatrixStructure<Element>(M, ElementsArray); #ifdef ENABLE_PROFILING end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "ConstructMatrixStructure completed: " << end_compute - start_compute << " s" << std::endl; start_compute = end_compute; #endif SerialDenseSpaceType::MatrixType g(NumberOfNodes, VariableSize); noalias(g)= ZeroMatrix(NumberOfNodes, VariableSize); SerialDenseSpaceType::MatrixType b(NumberOfNodes, VariableSize); noalias(b)= ZeroMatrix(NumberOfNodes, VariableSize); std::vector< omp_lock_t > lock_array(NumberOfNodes); for(unsigned int i = 0; i < NumberOfNodes; ++i) omp_init_lock(&lock_array[i]); //create a partition of the element array int number_of_threads = omp_get_max_threads(); std::vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, ElementsArray.size(), element_partition); KRATOS_WATCH( number_of_threads ) std::cout << "element_partition:"; for (std::size_t i = 0; i < element_partition.size(); ++i) std::cout << " " << element_partition[i]; std::cout << std::endl; #pragma omp parallel for for(int k = 0; k < number_of_threads; ++k) { Matrix InvJ(Dim, Dim); double DetJ; unsigned int row, col; typename ElementsArrayType::ptr_iterator it_begin = ElementsArray.ptr_begin() + element_partition[k]; typename ElementsArrayType::ptr_iterator it_end = ElementsArray.ptr_begin() + element_partition[k + 1]; for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it ) { if(!(*it)->GetValue(IS_INACTIVE)) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); // J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); // const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); IsogeometricGeometryType& rIsogeometricGeometry = dynamic_cast<IsogeometricGeometryType&>((*it)->GetGeometry()); J = rIsogeometricGeometry.Jacobian0(J, (*it)->GetIntegrationMethod()); GeometryType::ShapeFunctionsGradientsType DN_De; Matrix Ncontainer; rIsogeometricGeometry.CalculateShapeFunctionsIntegrationPointsValuesAndLocalGradients( Ncontainer, DN_De, (*it)->GetIntegrationMethod() ); // get the values at the integration_points std::vector<Vector> ValuesOnIntPoint(integration_points.size()); (*it)->CalculateOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, pModelPart->GetProcessInfo()); for(unsigned int point = 0; point < integration_points.size(); ++point) { MathUtils<double>::InvertMatrix(J[point], InvJ, DetJ); double dV = DetJ * integration_points[point].Weight(); for(unsigned int prim = 0; prim < (*it)->GetGeometry().size(); ++prim) { row = (*it)->GetGeometry()[prim].Id() - 1; omp_set_lock(&lock_array[row]); for(unsigned int i = 0; i < VariableSize; ++i) b(row, i) += ValuesOnIntPoint[point][i] * Ncontainer(point, prim) * dV; for(unsigned int sec = 0; sec < (*it)->GetGeometry().size(); ++sec) { col = (*it)->GetGeometry()[sec].Id() - 1; M(row, col) += Ncontainer(point, prim) * Ncontainer(point, sec) * dV; } omp_unset_lock(&lock_array[row]); } } } else { // for inactive elements the contribution to LHS is identity matrix and RHS is zero for(unsigned int prim = 0; prim < (*it)->GetGeometry().size(); ++prim) { row = (*it)->GetGeometry()[prim].Id() - 1; omp_set_lock(&lock_array[row]); // for(unsigned int i = 0; i < VariableSize; ++i) // b(row, i) += 0.0; for(unsigned int sec = 0; sec < (*it)->GetGeometry().size(); ++sec) { col = (*it)->GetGeometry()[sec].Id() - 1; if(col == row) M(row, col) += 1.0; // else // M(row, col) += 0.0; } omp_unset_lock(&lock_array[row]); } } } } for(unsigned int i = 0; i < NumberOfNodes; ++i) omp_destroy_lock(&lock_array[i]); #ifdef ENABLE_PROFILING end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Assemble the matrix completed: " << end_compute - start_compute << " s" << std::endl; start_compute = end_compute; #endif #ifdef DEBUG_MULTISOLVE KRATOS_WATCH(M) KRATOS_WATCH(b) KRATOS_WATCH(*pSolver) #endif // solve the system // solver must support the multisove method pSolver->Solve(M, g, b); #ifdef DEBUG_MULTISOLVE KRATOS_WATCH(g) #endif // transfer the solution to the nodal variables for(ModelPart::NodeIterator it = pModelPart->NodesBegin(); it != pModelPart->NodesEnd(); ++it) { Vector tmp(VariableSize); for(unsigned int i = 0; i < VariableSize; ++i) { tmp(i) = g((it->Id()-1), i); } it->GetSolutionStepValue(rThisVariable) = tmp; } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. BezierClassicalPostUtility& operator=(BezierClassicalPostUtility const& rOther) { return *this; } /// Copy constructor. BezierClassicalPostUtility(BezierClassicalPostUtility const& rOther) { } ///@} }; // Class BezierClassicalPostUtility ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function inline std::istream& operator >>(std::istream& rIStream, BezierClassicalPostUtility& rThis) { return rIStream; } /// output stream function inline std::ostream& operator <<(std::ostream& rOStream, const BezierClassicalPostUtility& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block }// namespace Kratos. #undef DEBUG_LEVEL1 #undef DEBUG_LEVEL2 #undef DEBUG_MULTISOLVE #undef DEBUG_GENERATE_MESH #undef ENABLE_PROFILING #endif
// // Project Name: Kratos // Last Modified by: $Author: hbui $ // Date: $Date: 2013-10-12 $ // Revision: $Revision: 1.0 $ // // #if !defined(KRATOS_BEZIER_CLASSICAL_POST_UTILITY_H_INCLUDED ) #define KRATOS_BEZIER_CLASSICAL_POST_UTILITY_H_INCLUDED // System includes #include <string> #include <vector> #include <iostream> // External includes #include <omp.h> #include "boost/progress.hpp" #ifdef ISOGEOMETRIC_USE_MPI #include "mpi.h" #endif // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "includes/element.h" #include "includes/properties.h" #include "includes/ublas_interface.h" #include "includes/deprecated_variables.h" #include "includes/legacy_structural_app_vars.h" #include "spaces/ublas_space.h" #include "linear_solvers/linear_solver.h" #include "utilities/openmp_utils.h" #include "utilities/auto_collapse_spatial_binning.h" #include "custom_utilities/iga_define.h" #include "custom_geometries/isogeometric_geometry.h" #include "custom_utilities/isogeometric_utility.h" #include "custom_utilities/isogeometric_post_utility.h" #include "isogeometric_application/isogeometric_application.h" //#define DEBUG_LEVEL1 //#define DEBUG_LEVEL2 //#define DEBUG_MULTISOLVE //#define DEBUG_GENERATE_MESH #define ENABLE_PROFILING namespace Kratos { ///@addtogroup IsogeometricApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ template<class T> void AddToModelPart(ModelPart& rModelPart, typename T::Pointer pE); template<> void AddToModelPart<Element>(ModelPart& rModelPart, typename Element::Pointer pE) { rModelPart.AddElement(pE); } template<> void AddToModelPart<Condition>(ModelPart& rModelPart, typename Condition::Pointer pC) { rModelPart.AddCondition(pC); } ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** A simple utility to export directly the FEM mesh out from isogeometric Bezier mesh. Each Bezier element will generate its own set of FEM elements. Therefore a large amount of nodes and elements may be generated. One shall carefully use this utility for large problem. Previously, this class is named IsogeometricClassicalPostUtility. */ class BezierClassicalPostUtility : public IsogeometricPostUtility { public: ///@name Type Definitions ///@{ typedef boost::numeric::ublas::vector<double> ValuesContainerType; typedef boost::numeric::ublas::matrix<double> ValuesArrayContainerType; typedef typename ModelPart::NodesContainerType NodesArrayType; typedef typename ModelPart::ElementsContainerType ElementsArrayType; typedef typename ModelPart::ConditionsContainerType ConditionsArrayType; typedef typename Element::GeometryType GeometryType; typedef typename GeometryType::PointType NodeType; typedef IsogeometricGeometry<NodeType> IsogeometricGeometryType; typedef typename GeometryType::IntegrationPointsArrayType IntegrationPointsArrayType; typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType; typedef typename NodeType::DofsContainerType DofsContainerType; typedef UblasSpace<double, CompressedMatrix, Vector> SerialSparseSpaceType; typedef UblasSpace<double, Matrix, Vector> SerialDenseSpaceType; typedef LinearSolver<SerialSparseSpaceType, SerialDenseSpaceType> LinearSolverType; typedef std::size_t IndexType; /// Pointer definition of BezierClassicalPostUtility KRATOS_CLASS_POINTER_DEFINITION(BezierClassicalPostUtility); ///@} ///@name Life Cycle ///@{ /// Default constructor. BezierClassicalPostUtility(ModelPart::Pointer pModelPart) : mpModelPart(pModelPart) { } /// Destructor. virtual ~BezierClassicalPostUtility() { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /// Generate the post model_part from reference model_part /// Deprecated void GenerateModelPart(ModelPart::Pointer pModelPartPost, PostElementType postElementType) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); #endif #ifdef DEBUG_LEVEL1 std::cout << typeid(*this).name() << "::GenerateModelPart" << std::endl; #endif ElementsArrayType& pElements = mpModelPart->Elements(); #ifdef DEBUG_LEVEL1 std::cout << "Retrieved pElements" << std::endl; #endif std::string NodeKey = std::string("Node"); //select the correct post element type std::string element_name; if(postElementType == _TRIANGLE_) element_name = std::string("KinematicLinear2D3N"); else if(postElementType == _QUADRILATERAL_) element_name = std::string("KinematicLinear2D4N"); else if(postElementType == _TETRAHEDRA_) element_name = std::string("KinematicLinear3D4N"); else if(postElementType == _HEXAHEDRA_) element_name = std::string("KinematicLinear3D8N"); else KRATOS_THROW_ERROR(std::logic_error, "This element type is not supported for isogeometric post-processing", __FUNCTION__); if(!KratosComponents<Element>::Has(element_name)) { std::stringstream buffer; buffer << "Element " << element_name << " is not registered in Kratos."; buffer << " Please check the spelling of the element name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Element const& rCloneElement = KratosComponents<Element>::Get(element_name); IndexType NodeCounter = 0; IndexType ElementCounter = 0; boost::progress_display show_progress( pElements.size() ); for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { if((*it)->GetValue( IS_INACTIVE )) { // std::cout << "Element " << (*it)->Id() << " is inactive" << std::endl; continue; } int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(Dim) #endif //get the properties Properties::Pointer pDummyProperties = (*it)->pGetProperties(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(*pDummyProperties) #endif // generate list of nodes if(Dim == 1) { // TODO } else if(Dim == 2) { IndexType NumDivision1 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_2) ); IndexType i, j; CoordinatesArrayType p_ref; CoordinatesArrayType p; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes p_ref[2] = 0.0; for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; p = GlobalCoordinates((*it)->GetGeometry(), p, p_ref); NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(++NodeCounter); #ifdef DEBUG_GENERATE_MESH // if(NodeCounter == 585 || NodeCounter == 588 || NodeCounter == 589) if(NodeCounter) { std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl; } #endif // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&pModelPartPost->GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(pModelPartPost->GetBufferSize()); pModelPartPost->AddNode(pNewNode); mNodeToLocalCoordinates(pNewNode->Id()) = p_ref; mNodeToElement(pNewNode->Id()) = (*it)->Id(); } } //for correct mapping to element, the repetitive node is allowed. // pModelPartPost->Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(pModelPartPost->Nodes().size()) std::cout << "Generating Elements..." << std::endl; #endif // create and add element // Element::NodesArrayType temp_element_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // int Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; // int Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; // int Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; // int Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; // if(postElementType == _TRIANGLE_) // { // // TODO: check if jacobian checking is necessary // temp_element_nodes.clear(); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node2, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base())); // Element::Pointer NewElement1 = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties); // pModelPartPost->AddElement(NewElement1); // mOldToNewElements[(*it)->Id()].insert(ElementCounter); // temp_element_nodes.clear(); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node3, NodeKey).base())); // Element::Pointer NewElement2 = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties); // pModelPartPost->AddElement(NewElement2); // mOldToNewElements[(*it)->Id()].insert(ElementCounter); // } // else if(postElementType == _QUADRILATERAL_) // { // // TODO: check if jacobian checking is necessary // temp_element_nodes.clear(); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node2, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node3, NodeKey).base())); // Element::Pointer NewElement = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties); // pModelPartPost->AddElement(NewElement); // mOldToNewElements[(*it)->Id()].insert(ElementCounter); // } // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { IndexType Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; IndexType Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; IndexType Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; IndexType Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; if(postElementType == _TRIANGLE_) { connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4}); connectivities.push_back(std::vector<IndexType>{Node1, Node4, Node3}); } else if(postElementType == _QUADRILATERAL_) { connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3}); } } } ElementsArrayType pNewElements = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, Element, ElementsArrayType>( connectivities, *pModelPartPost, rCloneElement, ElementCounter, pDummyProperties, NodeKey); for (typename ElementsArrayType::ptr_iterator it2 = pNewElements.ptr_begin(); it2 != pNewElements.ptr_end(); ++it2) { pModelPartPost->AddElement(*it2); mOldToNewElements[(*it)->Id()].insert((*it2)->Id()); } pModelPartPost->Elements().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(pModelPartPost->Elements().size()) #endif } else if(Dim == 3) { IndexType NumDivision1 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_2) ); IndexType NumDivision3 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_3) ); IndexType i, j, k; CoordinatesArrayType p_ref; CoordinatesArrayType p; #ifdef DEBUG_LEVEL1 KRATOS_WATCH((*it)->Id()) KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) KRATOS_WATCH(NumDivision3) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; for(k = 0; k <= NumDivision3; ++k) { p_ref[2] = ((double) k) / NumDivision3; p = GlobalCoordinates((*it)->GetGeometry(), p, p_ref); NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(++NodeCounter); #ifdef DEBUG_GENERATE_MESH if(NodeCounter) { std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl; } #endif // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&pModelPartPost->GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(pModelPartPost->GetBufferSize()); pModelPartPost->AddNode(pNewNode); mNodeToLocalCoordinates(pNewNode->Id()) = p_ref; mNodeToElement(pNewNode->Id()) = (*it)->Id(); } } } //for correct mapping to element, the repetitive node is allowed. // pModelPartPost->Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(pModelPartPost->Nodes().size()) std::cout << "Generating Elements..." << std::endl; #endif // create and add element // Element::NodesArrayType temp_element_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // for(k = 0; k < NumDivision3; ++k) // { // int Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node5 = Node1 + 1; // int Node6 = Node2 + 1; // int Node7 = Node3 + 1; // int Node8 = Node4 + 1; // if(postElementType == _TETRAHEDRA_) // { // // TODO: check if jacobian checking is necessary // } // else if(postElementType == _HEXAHEDRA_) // { // // TODO: check if jacobian checking is necessary // temp_element_nodes.clear(); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node2, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node3, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node5, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node6, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node8, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node7, NodeKey).base())); // Element::Pointer NewElement = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties); // pModelPartPost->AddElement(NewElement); // mOldToNewElements[(*it)->Id()].insert(ElementCounter); // } // } // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { for(k = 0; k < NumDivision3; ++k) { IndexType Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node5 = Node1 + 1; IndexType Node6 = Node2 + 1; IndexType Node7 = Node3 + 1; IndexType Node8 = Node4 + 1; if(postElementType == _TETRAHEDRA_) { // TODO: check if creating Tetrahedra is correct connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4}); connectivities.push_back(std::vector<IndexType>{Node1, Node4, Node3}); } else if(postElementType == _HEXAHEDRA_) { connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3, Node5, Node6, Node8, Node7}); } } } } ElementsArrayType pNewElements = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, Element, ElementsArrayType>( connectivities, *pModelPartPost, rCloneElement, ElementCounter, pDummyProperties, NodeKey); for (typename ElementsArrayType::ptr_iterator it2 = pNewElements.ptr_begin(); it2 != pNewElements.ptr_end(); ++it2) { pModelPartPost->AddElement(*it2); mOldToNewElements[(*it)->Id()].insert((*it2)->Id()); } pModelPartPost->Elements().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(pModelPartPost->Elements().size()) #endif } ++show_progress; } #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "GeneratePostModelPart completed: " << (end_compute - start_compute) << " s" << std::endl; #else std::cout << "GeneratePostModelPart completed" << std::endl; #endif std::cout << NodeCounter << " nodes and " << ElementCounter << " elements are created" << std::endl; } /// Generate the post model_part from reference model_part /// this is the improved version of GenerateModelPart /// which uses template function to generate post Elements for both Element and Condition void GenerateModelPart2(ModelPart::Pointer pModelPartPost, const bool& generate_for_condition) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); #endif #ifdef DEBUG_LEVEL1 std::cout << typeid(*this).name() << "::GenerateModelPart" << std::endl; #endif ElementsArrayType& pElements = mpModelPart->Elements(); ConditionsArrayType& pConditions = mpModelPart->Conditions(); std::string NodeKey = std::string("Node"); IndexType NodeCounter = 0; IndexType ElementCounter = 0; boost::progress_display show_progress( pElements.size() ); std::vector<std::size_t> dummy_ids; for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { // This is wrong, we will not skill the IS_INACTIVE elements // TODO: to be deleted // if((*it)->GetValue( IS_INACTIVE )) // { //// std::cout << "Element " << (*it)->Id() << " is inactive" << std::endl; // ++show_progress; // continue; // } if((*it)->pGetGeometry() == 0) KRATOS_THROW_ERROR(std::logic_error, "Error: geometry is NULL at element", (*it)->Id()) int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(Dim) KRATOS_WATCH(ReducedDim) #endif //select the correct post element type std::string element_name; if((Dim == 2) && (ReducedDim == 2)) { element_name = std::string("KinematicLinear2D4N"); } else if((Dim == 3) && (ReducedDim == 2)) { element_name = std::string("KinematicLinear2D4N"); } else if((Dim == 3) && (ReducedDim == 3)) { element_name = std::string("KinematicLinear3D8N"); } else { std::stringstream ss; ss << "Invalid dimension of "; ss << typeid(*(*it)).name(); ss << ", Dim = " << Dim; ss << ", ReducedDim = " << ReducedDim; KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__); } if(!KratosComponents<Element>::Has(element_name)) { std::stringstream buffer; buffer << "Element " << element_name << " is not registered in Kratos."; buffer << " Please check the spelling of the element name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Element const& rCloneElement = KratosComponents<Element>::Get(element_name); GenerateForOneEntity<Element, ElementsArrayType, 1>(*pModelPartPost, *(*it), rCloneElement, NodeCounter_old, NodeCounter, ElementCounter, NodeKey, false, dummy_ids, dummy_ids, false); ++show_progress; } KRATOS_WATCH(ElementCounter) #ifdef DEBUG_LEVEL1 std::cout << "Done generating for elements" << std::endl; #endif IndexType ConditionCounter = 0; if (generate_for_condition) { boost::progress_display show_progress2( pConditions.size() ); for (typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it) { // This is wrong, we will not kill the IS_INACTIVE conditions // TODO: to be deleted // if((*it)->GetValue( IS_INACTIVE )) // { //// std::cout << "Condition " << (*it)->Id() << " is inactive" << std::endl; // ++show_progress2; // continue; // } if((*it)->pGetGeometry() == 0) KRATOS_THROW_ERROR(std::logic_error, "Error: geometry is NULL at condition", (*it)->Id()) int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(typeid((*it)->GetGeometry()).name()) KRATOS_WATCH(Dim) KRATOS_WATCH(ReducedDim) #endif //select the correct post condition type std::string condition_name; if(Dim == 3 && ReducedDim == 1) condition_name = std::string("LineForce3D2N"); else if(Dim == 3 && ReducedDim == 2) condition_name = std::string("FaceForce3D4N"); else { std::stringstream ss; ss << "Invalid dimension of "; ss << typeid(*(*it)).name(); ss << ", Dim = " << Dim; ss << ", ReducedDim = " << ReducedDim; ss << ". Condition " << (*it)->Id() << " will be skipped."; // KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__); continue; } if(!KratosComponents<Condition>::Has(condition_name)) { std::stringstream buffer; buffer << "Condition " << condition_name << " is not registered in Kratos."; buffer << " Please check the spelling of the condition name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Condition const& rCloneCondition = KratosComponents<Condition>::Get(condition_name); GenerateForOneEntity<Condition, ConditionsArrayType, 2>(*pModelPartPost, *(*it), rCloneCondition, NodeCounter_old, NodeCounter, ConditionCounter, NodeKey, false, dummy_ids, dummy_ids, false); ++show_progress2; } KRATOS_WATCH(ConditionCounter) } #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "GeneratePostModelPart2 completed: " << (end_compute - start_compute) << " s" << std::endl; #else std::cout << "GeneratePostModelPart2 completed" << std::endl; #endif std::cout << NodeCounter << " nodes and " << ElementCounter << " elements"; if (generate_for_condition) std::cout << ", " << ConditionCounter << " conditions"; std::cout << " are created" << std::endl; } // Generate the post model_part from reference model_part // this is the improved version of GenerateModelPart // which uses template function to generate post Elements for both Element and Condition // this version used a collapsing utility to collapse nodes automatically void GenerateModelPart2AutoCollapse(ModelPart::Pointer pModelPartPost, double dx, double dy, double dz, double tol) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); #endif #ifdef DEBUG_LEVEL1 std::cout << typeid(*this).name() << "::GenerateModelPart" << std::endl; #endif AutoCollapseSpatialBinning collapse_util(0.0, 0.0, 0.0, dx, dy, dz, tol); ElementsArrayType& pElements = mpModelPart->Elements(); ConditionsArrayType& pConditions = mpModelPart->Conditions(); std::string NodeKey = std::string("Node"); IndexType NodeCounter = 0; IndexType ElementCounter = 0; boost::progress_display show_progress( pElements.size() ); VectorMap<IndexType, IndexType> MapToCollapseNode; for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { if((*it)->GetValue( IS_INACTIVE )) { // std::cout << "Element " << (*it)->Id() << " is inactive" << std::endl; ++show_progress; continue; } int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(Dim) KRATOS_WATCH(ReducedDim) #endif //select the correct post element type std::string element_name; if(Dim == 2 && ReducedDim == 2) element_name = std::string("KinematicLinear2D4N"); else if(Dim == 3 && ReducedDim == 3) element_name = std::string("KinematicLinear3D8N"); else { std::stringstream ss; ss << "Invalid dimension of "; ss << typeid(*(*it)).name(); ss << ", Dim = " << Dim; ss << ", ReducedDim = " << ReducedDim; KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__); } if(!KratosComponents<Element>::Has(element_name)) { std::stringstream buffer; buffer << "Element " << element_name << " is not registered in Kratos."; buffer << " Please check the spelling of the element name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Element const& rCloneElement = KratosComponents<Element>::Get(element_name); GenerateForOneEntityAutoCollapse<Element, ElementsArrayType, 1>(collapse_util, *pModelPartPost, *(*it), rCloneElement, MapToCollapseNode, NodeCounter_old, NodeCounter, ElementCounter, NodeKey); ++show_progress; } #ifdef DEBUG_LEVEL1 std::cout << "Done generating for elements" << std::endl; #endif IndexType ConditionCounter = 0; boost::progress_display show_progress2( pConditions.size() ); for (typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it) { if((*it)->GetValue( IS_INACTIVE )) { // std::cout << "Condition " << (*it)->Id() << " is inactive" << std::endl; ++show_progress2; continue; } int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(typeid((*it)->GetGeometry()).name()) KRATOS_WATCH(Dim) KRATOS_WATCH(ReducedDim) #endif //select the correct post condition type std::string condition_name; if(Dim == 3 && ReducedDim == 1) condition_name = std::string("LineForce3D2N"); else if(Dim == 3 && ReducedDim == 2) condition_name = std::string("FaceForce3D4N"); else { std::stringstream ss; ss << "Invalid dimension of "; ss << typeid(*(*it)).name(); ss << ", Dim = " << Dim; ss << ", ReducedDim = " << ReducedDim; KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__); } if(!KratosComponents<Condition>::Has(condition_name)) { std::stringstream buffer; buffer << "Condition " << condition_name << " is not registered in Kratos."; buffer << " Please check the spelling of the condition name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Condition const& rCloneCondition = KratosComponents<Condition>::Get(condition_name); GenerateForOneEntityAutoCollapse<Condition, ConditionsArrayType, 2>(collapse_util, *pModelPartPost, *(*it), rCloneCondition, MapToCollapseNode, NodeCounter_old, NodeCounter, ConditionCounter, NodeKey); ++show_progress2; } #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Generate PostModelPart completed: " << (end_compute - start_compute) << " s" << std::endl; #else std::cout << "Generate PostModelPart completed" << std::endl; #endif std::cout << NodeCounter << " nodes and " << ElementCounter << " elements" << ", " << ConditionCounter << " conditions are created" << std::endl; } /** * Utility function to generate elements/conditions for element/condition * if TEntityType==Element, type must be 1; if T==Condition, type is 2 */ template<class TEntityType, class TEntityContainerType, std::size_t type> void GenerateForOneEntity(ModelPart& rModelPart, TEntityType& rE, TEntityType const& rSample, IndexType NodeCounter_old, IndexType& NodeCounter, IndexType& EntityCounter, const std::string& NodeKey, const bool& transfer_nodal_var, std::vector<std::size_t>& node_ids, std::vector<std::size_t>& element_ids, const bool& get_indices) { // int ReducedDim = rE.GetGeometry().WorkingSpaceDimension(); int ReducedDim = rE.GetGeometry().Dimension(); //get the properties Properties::Pointer pDummyProperties = rE.pGetProperties(); #ifdef DEBUG_LEVEL1 std::cout << "Generating for " << rE.Info() << std::endl; KRATOS_WATCH(*pDummyProperties) KRATOS_WATCH(EntityCounter) #endif // generate list of nodes if(ReducedDim == 1) { // TODO } else if(ReducedDim == 2) { IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) ); IndexType i, j; CoordinatesArrayType p_ref; CoordinatesArrayType p; Vector shape_values; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes p_ref[2] = 0.0; for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; p = GlobalCoordinates(rE.GetGeometry(), p, p_ref); NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(++NodeCounter); // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(rModelPart.GetBufferSize()); rModelPart.AddNode(pNewNode); if(type == 1) { mNodeToLocalCoordinates(pNewNode->Id()) = p_ref; mNodeToElement(pNewNode->Id()) = rE.Id(); } if (transfer_nodal_var) { shape_values = rE.GetGeometry().ShapeFunctionsValues(shape_values, p_ref); VariablesList& var_list = rModelPart.GetNodalSolutionStepVariablesList(); for (VariablesList::const_iterator it = var_list.begin(); it != var_list.end(); ++it) { if (typeid(*it) == typeid(Variable<double>)) { const Variable<double>& my_variable = dynamic_cast<const Variable<double>&>(*it); double value = 0.0; for (std::size_t n = 0; n < rE.GetGeometry().size(); ++n) value += shape_values[n] * rE.GetGeometry()[n].GetSolutionStepValue(my_variable); pNewNode->GetSolutionStepValue(my_variable) = value; } else if (typeid(*it) == typeid(Variable<array_1d<double, 3> >)) { const Variable<array_1d<double, 3> >& my_variable = dynamic_cast<const Variable<array_1d<double, 3> >&>(*it); array_1d<double, 3> value; noalias(value) = ZeroVector(3); for (std::size_t n = 0; n < rE.GetGeometry().size(); ++n) noalias(value) += shape_values[n] * rE.GetGeometry()[n].GetSolutionStepValue(my_variable); pNewNode->GetSolutionStepValue(my_variable) = value; } } } if (get_indices) node_ids.push_back(pNewNode->Id()); } } //for correct mapping to element, the repetitive node is allowed. // rModelPart.Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rModelPart.Nodes().size()) if(type == 1) std::cout << "Generating Elements..." << std::endl; else std::cout << "Generating Conditions..." << std::endl; #endif // create and add element // typename T::NodesArrayType temp_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // int Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; // int Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; // int Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; // int Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; // // TODO: check if jacobian checking is necessary // temp_nodes.clear(); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node1, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node2, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node4, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node3, NodeKey).base())); // int NewEntityId = ++EntityCounter; // // int NewEntityId = rE.Id(); ++EntityCounter; // typename TEntityType::Pointer NewEntity = rSample.Create(NewEntityId, temp_nodes, pDummyProperties); // AddToModelPart<TEntityType>(rModelPart, NewEntity); // if(type == 1) // mOldToNewElements[rE.Id()].insert(NewEntityId); // else if(type == 2) // mOldToNewConditions[rE.Id()].insert(NewEntityId); // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { IndexType Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; IndexType Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; IndexType Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; IndexType Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3}); } } TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>( connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey); for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { AddToModelPart<TEntityType>(rModelPart, *it2); if(type == 1) mOldToNewElements[rE.Id()].insert((*it2)->Id()); else if(type == 2) mOldToNewConditions[rE.Id()].insert((*it2)->Id()); } if(type == 1) rModelPart.Elements().Unique(); else if(type == 2) rModelPart.Conditions().Unique(); #ifdef DEBUG_LEVEL1 if(type == 1) KRATOS_WATCH(rModelPart.Elements().size()) else KRATOS_WATCH(rModelPart.Conditions().size()) #endif if (get_indices) { for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { element_ids.push_back((*it2)->Id()); } } } else if(ReducedDim == 3) { IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) ); IndexType NumDivision3 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_3) ); IndexType i, j, k; CoordinatesArrayType p_ref; CoordinatesArrayType p; Vector shape_values; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rE.Id()) KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) KRATOS_WATCH(NumDivision3) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; for(k = 0; k <= NumDivision3; ++k) { p_ref[2] = ((double) k) / NumDivision3; p = GlobalCoordinates(rE.GetGeometry(), p, p_ref); NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(++NodeCounter); #ifdef DEBUG_GENERATE_MESH if(NodeCounter) { std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl; } #endif // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(rModelPart.GetBufferSize()); rModelPart.AddNode(pNewNode); if(type == 1) { mNodeToLocalCoordinates(pNewNode->Id()) = p_ref; mNodeToElement(pNewNode->Id()) = rE.Id(); } if (transfer_nodal_var) { shape_values = rE.GetGeometry().ShapeFunctionsValues(shape_values, p_ref); VariablesList& var_list = rModelPart.GetNodalSolutionStepVariablesList(); for (VariablesList::const_iterator it = var_list.begin(); it != var_list.end(); ++it) { if (typeid(*it) == typeid(Variable<double>)) { const Variable<double>& my_variable = dynamic_cast<const Variable<double>&>(*it); double value = 0.0; for (std::size_t n = 0; n < rE.GetGeometry().size(); ++n) value += shape_values[n] * rE.GetGeometry()[n].GetSolutionStepValue(my_variable); pNewNode->GetSolutionStepValue(my_variable) = value; } else if (typeid(*it) == typeid(Variable<array_1d<double, 3> >)) { const Variable<array_1d<double, 3> >& my_variable = dynamic_cast<const Variable<array_1d<double, 3> >&>(*it); array_1d<double, 3> value; noalias(value) = ZeroVector(3); for (std::size_t n = 0; n < rE.GetGeometry().size(); ++n) noalias(value) += shape_values[n] * rE.GetGeometry()[n].GetSolutionStepValue(my_variable); pNewNode->GetSolutionStepValue(my_variable) = value; } } } if (get_indices) node_ids.push_back(pNewNode->Id()); } } } //for correct mapping to element, the repetitive node is allowed. // rModelPart.Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rModelPart.Nodes().size()) if(type == 1) std::cout << "Generating Elements..." << std::endl; else std::cout << "Generating Conditions..." << std::endl; #endif // create and add element // typename T::NodesArrayType temp_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // for(k = 0; k < NumDivision3; ++k) // { // int Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node5 = Node1 + 1; // int Node6 = Node2 + 1; // int Node7 = Node3 + 1; // int Node8 = Node4 + 1; // // TODO: check if jacobian checking is necessary // temp_nodes.clear(); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node1, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node2, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node4, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node3, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node5, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node6, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node8, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node7, NodeKey).base())); // int NewEntityId = ++EntityCounter; // typename TEntityType::Pointer NewEntity = rSample.Create(NewEntityId, temp_nodes, pDummyProperties); // AddToModelPart<TEntityType>(rModelPart, NewEntity); // if(type == 1) // mOldToNewElements[rE.Id()].insert(NewEntityId); // else if(type == 2) // mOldToNewConditions[rE.Id()].insert(NewEntityId); // } // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { for(k = 0; k < NumDivision3; ++k) { IndexType Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node5 = Node1 + 1; IndexType Node6 = Node2 + 1; IndexType Node7 = Node3 + 1; IndexType Node8 = Node4 + 1; connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3, Node5, Node6, Node8, Node7}); } } } TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>( connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey); for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { AddToModelPart<TEntityType>(rModelPart, *it2); if(type == 1) mOldToNewElements[rE.Id()].insert((*it2)->Id()); else if(type == 2) mOldToNewConditions[rE.Id()].insert((*it2)->Id()); } if(type == 1) rModelPart.Elements().Unique(); else if(type == 2) rModelPart.Conditions().Unique(); #ifdef DEBUG_LEVEL1 if(type == 1) KRATOS_WATCH(rModelPart.Elements().size()) else KRATOS_WATCH(rModelPart.Conditions().size()) #endif if (get_indices) { for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { element_ids.push_back((*it2)->Id()); } } } } /** * Utility function to generate elements/conditions for element/condition. * This uses a collapse utility to automatically merge the coincident nodes * if T==Element, type must be 1; otherwise type=2 */ template<class TEntityType, class TEntityContainerType, std::size_t type> void GenerateForOneEntityAutoCollapse(AutoCollapseSpatialBinning& collapse_util, ModelPart& rModelPart, TEntityType& rE, TEntityType const& rSample, VectorMap<IndexType, IndexType>& rMapToCollapseNode, IndexType NodeCounter_old, IndexType& NodeCounter, IndexType& EntityCounter, const std::string& NodeKey) { // int ReducedDim = rE.GetGeometry().WorkingSpaceDimension(); int ReducedDim = rE.GetGeometry().Dimension(); //get the properties Properties::Pointer pDummyProperties = rE.pGetProperties(); #ifdef DEBUG_LEVEL1 if(type == 1) std::cout << "Generating for element " << rE.Id() << std::endl; else std::cout << "Generating for condition " << rE.Id() << std::endl; KRATOS_WATCH(*pDummyProperties) #endif // generate list of nodes if(ReducedDim == 1) { // TODO } else if(ReducedDim == 2) { IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) ); IndexType i, j; CoordinatesArrayType p_ref; CoordinatesArrayType p; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes p_ref[2] = 0.0; for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; p = GlobalCoordinates(rE.GetGeometry(), p, p_ref); IndexType id = static_cast<IndexType>( collapse_util.AddNode(p[0], p[1], p[2]) ); ++NodeCounter; rMapToCollapseNode[NodeCounter] = id; if(rModelPart.Nodes().find(id) == rModelPart.Nodes().end()) { // this is a new node NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(id); // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(rModelPart.GetBufferSize()); rModelPart.AddNode(pNewNode); } else { // this is an old node, not required to add to model_part // so do nothing } // in this way, the node will always point to the last local coodinates and element if(type == 1) { mNodeToLocalCoordinates(id) = p_ref; mNodeToElement(id) = rE.Id(); } } } //for correct mapping to element, the repetitive node is allowed. // rModelPart.Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rModelPart.Nodes().size()) if(type == 1) std::cout << "Generating Elements..." << std::endl; else std::cout << "Generating Conditions..." << std::endl; #endif // create and add element // typename T::NodesArrayType temp_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // int Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; // int Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; // int Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; // int Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; // // TODO: check if jacobian checking is necessary // temp_nodes.clear(); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node1], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node2], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node4], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node3], NodeKey).base())); // typename T::Pointer NewEntity = rSample.Create(++EntityCounter, temp_nodes, pDummyProperties); // AddToModelPart<T>(rModelPart, NewEntity); // if(type == 1) // mOldToNewElements[rE.Id()].insert(EntityCounter); // else if(type == 2) // mOldToNewConditions[rE.Id()].insert(EntityCounter); // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { IndexType Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; IndexType Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; IndexType Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; IndexType Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; connectivities.push_back(std::vector<IndexType>{ rMapToCollapseNode[Node1], rMapToCollapseNode[Node2], rMapToCollapseNode[Node4], rMapToCollapseNode[Node3]}); } } TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>( connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey); for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { AddToModelPart<TEntityType>(rModelPart, *it2); if(type == 1) mOldToNewElements[rE.Id()].insert((*it2)->Id()); else if(type == 2) mOldToNewConditions[rE.Id()].insert((*it2)->Id()); } if(type == 1) rModelPart.Elements().Unique(); else if(type == 2) rModelPart.Conditions().Unique(); #ifdef DEBUG_LEVEL1 if(type == 1) KRATOS_WATCH(rModelPart.Elements().size()) else KRATOS_WATCH(rModelPart.Conditions().size()) #endif } else if(ReducedDim == 3) { IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) ); IndexType NumDivision3 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_3) ); IndexType i, j, k; CoordinatesArrayType p_ref; CoordinatesArrayType p; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rE.Id()) KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) KRATOS_WATCH(NumDivision3) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; for(k = 0; k <= NumDivision3; ++k) { p_ref[2] = ((double) k) / NumDivision3; p = GlobalCoordinates(rE.GetGeometry(), p, p_ref); IndexType id = static_cast<IndexType>( collapse_util.AddNode(p[0], p[1], p[2]) ); ++NodeCounter; rMapToCollapseNode[NodeCounter] = id; if(rModelPart.Nodes().find(id) == rModelPart.Nodes().end()) { // this is a new node NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(id); #ifdef DEBUG_GENERATE_MESH if(NodeCounter) { std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl; } #endif // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(rModelPart.GetBufferSize()); rModelPart.AddNode(pNewNode); } else { // this is an old node, not required to add to model_part // so do nothing } // in this way, the node will always point to the last local coodinates and element if(type == 1) { mNodeToLocalCoordinates(id) = p_ref; mNodeToElement(id) = rE.Id(); } } } } //for correct mapping to element, the repetitive node is allowed. // rModelPart.Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rModelPart.Nodes().size()) if(type == 1) std::cout << "Generating Elements..." << std::endl; else std::cout << "Generating Conditions..." << std::endl; #endif // create and add element // typename T::NodesArrayType temp_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // for(k = 0; k < NumDivision3; ++k) // { // int Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node5 = Node1 + 1; // int Node6 = Node2 + 1; // int Node7 = Node3 + 1; // int Node8 = Node4 + 1; // // TODO: check if jacobian checking is necessary // temp_nodes.clear(); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node1], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node2], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node4], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node3], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node5], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node6], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node8], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node7], NodeKey).base())); // typename T::Pointer NewEntity = rSample.Create(++EntityCounter, temp_nodes, pDummyProperties); // AddToModelPart<T>(rModelPart, NewEntity); // if(type == 1) // mOldToNewElements[rE.Id()].insert(EntityCounter); // else if(type == 2) // mOldToNewConditions[rE.Id()].insert(EntityCounter); // } // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { for(k = 0; k < NumDivision3; ++k) { IndexType Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node5 = Node1 + 1; IndexType Node6 = Node2 + 1; IndexType Node7 = Node3 + 1; IndexType Node8 = Node4 + 1; connectivities.push_back(std::vector<IndexType>{ rMapToCollapseNode[Node1], rMapToCollapseNode[Node2], rMapToCollapseNode[Node4], rMapToCollapseNode[Node3], rMapToCollapseNode[Node5], rMapToCollapseNode[Node6], rMapToCollapseNode[Node8], rMapToCollapseNode[Node7]}); } } } TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>( connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey); for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { AddToModelPart<TEntityType>(rModelPart, *it2); if(type == 1) mOldToNewElements[rE.Id()].insert((*it2)->Id()); else if(type == 2) mOldToNewConditions[rE.Id()].insert((*it2)->Id()); } if(type == 1) rModelPart.Elements().Unique(); else if(type == 2) rModelPart.Conditions().Unique(); #ifdef DEBUG_LEVEL1 if(type == 1) KRATOS_WATCH(rModelPart.Elements().size()) else KRATOS_WATCH(rModelPart.Conditions().size()) #endif } } // Synchronize the activation between model_parts void SynchronizeActivation(ModelPart::Pointer pModelPartPost) { ElementsArrayType& pElements = mpModelPart->Elements(); for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { std::set<IndexType> NewElements = mOldToNewElements[(*it)->Id()]; for(std::set<IndexType>::iterator it2 = NewElements.begin(); it2 != NewElements.end(); ++it2) { pModelPartPost->GetElement(*it2).GetValue(IS_INACTIVE) = (*it)->GetValue( IS_INACTIVE ); } } ConditionsArrayType& pConditions = mpModelPart->Conditions(); for (typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it) { std::set<IndexType> NewConditions = mOldToNewConditions[(*it)->Id()]; for(std::set<IndexType>::iterator it2 = NewConditions.begin(); it2 != NewConditions.end(); ++it2) { pModelPartPost->GetCondition(*it2).GetValue(IS_INACTIVE) = (*it)->GetValue( IS_INACTIVE ); } } } // transfer the elemental data template<class TVariableType> void TransferElementalData(const TVariableType& rThisVariable, ModelPart::Pointer pModelPartPost) { ElementsArrayType& pElements = mpModelPart->Elements(); for(typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { std::set<IndexType> NewElements = mOldToNewElements[(*it)->Id()]; for(std::set<IndexType>::iterator it2 = NewElements.begin(); it2 != NewElements.end(); ++it2) { pModelPartPost->GetElement(*it2).GetValue(rThisVariable) = (*it)->GetValue(rThisVariable); } } } // transfer the conditional data template<class TVariableType> void TransferConditionalData(const TVariableType& rThisVariable, ModelPart::Pointer pModelPartPost) { ConditionsArrayType& pConditions = mpModelPart->Conditions(); for(typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it) { std::set<IndexType> NewConditions = mOldToNewConditions[(*it)->Id()]; for(std::set<IndexType>::iterator it2 = NewConditions.begin(); it2 != NewConditions.end(); ++it2) { pModelPartPost->GetCondition(*it2).GetValue(rThisVariable) = (*it)->GetValue(rThisVariable); } } } // Synchronize post model_part with the reference model_part template<class TVariableType> void TransferNodalResults( const TVariableType& rThisVariable, const ModelPart::Pointer pModelPartPost ) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); #endif NodesArrayType& pTargetNodes = pModelPartPost->Nodes(); ElementsArrayType& pElements = mpModelPart->Elements(); typename TVariableType::Type Results; CoordinatesArrayType LocalPos; IndexType ElementId; // //TODO: check this. This is not parallelized. for(NodesArrayType::ptr_iterator it = pTargetNodes.ptr_begin(); it != pTargetNodes.ptr_end(); ++it) { IndexType key = (*it)->Id(); if(mNodeToElement.find(key) != mNodeToElement.end()) { ElementId = mNodeToElement[key]; if( ! pElements(ElementId)->GetValue(IS_INACTIVE) ) // skip the inactive elements { noalias(LocalPos) = mNodeToLocalCoordinates[key]; Results = CalculateOnPoint(rThisVariable, Results, pElements(ElementId), LocalPos); (*it)->GetSolutionStepValue(rThisVariable) = Results; } } } #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Transfer nodal point results for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << " s" << std::endl; #endif } // Synchronize post model_part with the reference model_part template<class TVariableType> void TransferIntegrationPointResults( const TVariableType& rThisVariable, const ModelPart::Pointer pModelPartPost, LinearSolverType::Pointer pSolver ) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); std::cout << "########################################" << std::endl; std::cout << "Transfer integration point results for " << rThisVariable.Name() << " starts" << std::endl; #endif // firstly transfer rThisVariable from integration points of reference model_part to its nodes TransferVariablesToNodes(pSolver, mpModelPart, rThisVariable); // secondly transfer new nodal variables results to the post model_part TransferNodalResults(rThisVariable, pModelPartPost); #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Transfer integration point results for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << "s" << std::endl; std::cout << "########################################" << std::endl; #endif } // Transfer the variable to nodes for model_part template<class TVariableType> void TransferVariablesToNodes( const TVariableType& rThisVariable, ModelPart::Pointer pModelPart, LinearSolverType::Pointer pSolver ) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); std::cout << "########################################" << std::endl; std::cout << "Transfer integration point results to nodes for " << rThisVariable.Name() << " starts" << std::endl; #endif TransferVariablesToNodes(pSolver, pModelPart, rThisVariable); #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Transfer integration point results to nodes for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << "s" << std::endl; std::cout << "########################################" << std::endl; #endif } /** * Utility function to renumber the nodes of the post model_part (for parallel merge) */ void GlobalNodalRenumbering(ModelPart::Pointer pModelPartPost) { #ifdef ISOGEOMETRIC_USE_MPI int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); // gather the number of nodes on each process int NumberOfNodes[size]; int MyNumberOfNodes = pModelPartPost->NumberOfNodes(); MPI_Allgather(&MyNumberOfNodes, 1, MPI_INT, NumberOfNodes, 1, MPI_INT, MPI_COMM_WORLD); // std::cout << "NumberOfNodes:"; // for(int i = 0; i < size; ++i) // std::cout << " " << NumberOfNodes[i]; // std::cout << std::endl; // compute the numbering offset int offset = 0; for(int i = 0; i < rank; ++i) offset += NumberOfNodes[i]; // renumber the nodes of the current process for(ModelPart::NodeIterator it = pModelPartPost->NodesBegin(); it != pModelPartPost->NodesEnd(); ++it) { it->SetId(++offset); it->GetSolutionStepValue(PARTITION_INDEX) = rank; } if(rank == 0) std::cout << "Global renumbering completed" << std::endl; #endif } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "BezierClassicalPostUtility"; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "BezierClassicalPostUtility"; } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ModelPart::Pointer mpModelPart; // pointer variable to a model_part VectorMap<IndexType, CoordinatesArrayType> mNodeToLocalCoordinates; // vector map to store local coordinates of node on a NURBS entity VectorMap<IndexType, IndexType> mNodeToElement; // vector map to store local coordinates of node on a NURBS entity std::map<IndexType, std::set<IndexType> > mOldToNewElements; // vector map to store id map from old element to new elements std::map<IndexType, std::set<IndexType> > mOldToNewConditions; // vector map to store id map from old condition to new conditions ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * Calculate global coodinates w.r.t initial configuration */ CoordinatesArrayType& GlobalCoordinates( GeometryType& rGeometry, CoordinatesArrayType& rResult, CoordinatesArrayType const& LocalCoordinates ) { noalias( rResult ) = ZeroVector( 3 ); Vector ShapeFunctionsValues; rGeometry.ShapeFunctionsValues(ShapeFunctionsValues, LocalCoordinates); for ( IndexType i = 0 ; i < rGeometry.size() ; ++i ) { noalias( rResult ) += ShapeFunctionsValues( i ) * rGeometry.GetPoint( i ).GetInitialPosition(); } return rResult; } /** * Interpolation on element */ double CalculateOnPoint( const Variable<double>& rVariable, double& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates ) { Vector N; pElement->GetGeometry().ShapeFunctionsValues(N, rCoordinates); rResult = 0.0; for(unsigned int i = 0; i < pElement->GetGeometry().size(); ++i) { double NodalValues = pElement->GetGeometry()[i].GetSolutionStepValue(rVariable); rResult += N( i ) * NodalValues; } return rResult; } /** * Interpolation on element */ Vector& CalculateOnPoint( const Variable<Vector>& rVariable, Vector& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates ) { Vector N; pElement->GetGeometry().ShapeFunctionsValues(N, rCoordinates); for(unsigned int i = 0; i < pElement->GetGeometry().size(); ++i) { Vector& NodalValues = pElement->GetGeometry()[i].GetSolutionStepValue(rVariable); if(i == 0) { rResult = N( i ) * NodalValues; } else { noalias(rResult) += N( i ) * NodalValues; } } return rResult; } /** * Interpolation on element */ array_1d<double, 3>& CalculateOnPoint( const Variable<array_1d<double, 3> >& rVariable, array_1d<double, 3>& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates ) { Vector N; pElement->GetGeometry().ShapeFunctionsValues(N, rCoordinates); rResult[0] = 0.0; rResult[1] = 0.0; rResult[2] = 0.0; for(unsigned int i = 0; i < pElement->GetGeometry().size(); ++i) { array_1d<double, 3> NodalValues = pElement->GetGeometry()[i].GetSolutionStepValue(rVariable); rResult += N( i ) * NodalValues; } return rResult; } /** * Transfer variable at integration points to nodes * * @param pSolver the solver used for solving the local system matrix * @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes * @param rThisVariable the variable need to transfer the respected values */ void TransferVariablesToNodes( LinearSolverType::Pointer& pSolver, ModelPart::Pointer& pModelPart, const Variable<double>& rThisVariable ) { ElementsArrayType& ElementsArray= pModelPart->Elements(); //Initialize system of equations int NumberOfNodes = pModelPart->NumberOfNodes(); SerialSparseSpaceType::MatrixType M(NumberOfNodes, NumberOfNodes); noalias(M)= ZeroMatrix(NumberOfNodes, NumberOfNodes); SerialSparseSpaceType::VectorType g(NumberOfNodes); noalias(g)= ZeroVector(NumberOfNodes); SerialSparseSpaceType::VectorType b(NumberOfNodes); noalias(b)= ZeroVector(NumberOfNodes); // create the structure for M a priori ConstructL2MatrixStructure<Element>(M, ElementsArray); // Transfer of GaussianVariables to Nodal Variables via L_2-Minimization // see Jiao + Heath "Common-refinement-based data tranfer ..." // International Journal for numerical methods in engineering 61 (2004) 2402--2427 // for general description of L_2-Minimization // set up the system of equations //create a partition of the element array int number_of_threads = omp_get_max_threads(); std::vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, ElementsArray.size(), element_partition); KRATOS_WATCH( number_of_threads ) std::cout << "element_partition:"; for (std::size_t i = 0; i < element_partition.size(); ++i) std::cout << " " << element_partition[i]; std::cout << std::endl; //create the array of lock std::vector< omp_lock_t > lock_array(NumberOfNodes); for(unsigned int i = 0; i < NumberOfNodes; ++i) omp_init_lock(&lock_array[i]); for(int k = 0; k < number_of_threads; ++k) { Matrix InvJ(3, 3); double DetJ; unsigned int row, col; typename ElementsArrayType::ptr_iterator it_begin = ElementsArray.ptr_begin() + element_partition[k]; typename ElementsArrayType::ptr_iterator it_end = ElementsArray.ptr_begin() + element_partition[k + 1]; for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it ) { if(!(*it)->GetValue(IS_INACTIVE)) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); // J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); // const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); IsogeometricGeometryType& rIsogeometricGeometry = dynamic_cast<IsogeometricGeometryType&>((*it)->GetGeometry()); J = rIsogeometricGeometry.Jacobian0(J, (*it)->GetIntegrationMethod()); GeometryType::ShapeFunctionsGradientsType DN_De; Matrix Ncontainer; rIsogeometricGeometry.CalculateShapeFunctionsIntegrationPointsValuesAndLocalGradients( Ncontainer, DN_De, (*it)->GetIntegrationMethod() ); // get the values at the integration_points std::vector<double> ValuesOnIntPoint(integration_points.size()); (*it)->CalculateOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, pModelPart->GetProcessInfo()); for(unsigned int point = 0; point< integration_points.size(); ++point) { MathUtils<double>::InvertMatrix(J[point], InvJ, DetJ); double dV = DetJ * integration_points[point].Weight(); for(unsigned int prim = 0 ; prim < (*it)->GetGeometry().size(); ++prim) { row = (*it)->GetGeometry()[prim].Id()-1; omp_set_lock(&lock_array[row]); b(row) += (ValuesOnIntPoint[point]) * Ncontainer(point, prim) * dV; for(unsigned int sec = 0 ; sec < (*it)->GetGeometry().size(); ++sec) { col = (*it)->GetGeometry()[sec].Id()-1; M(row, col) += Ncontainer(point, prim) * Ncontainer(point, sec) * dV; } omp_unset_lock(&lock_array[row]); } } } else { // for inactive elements the contribution to LHS is identity matrix and RHS is zero for(unsigned int prim = 0 ; prim < (*it)->GetGeometry().size(); ++prim) { row = (*it)->GetGeometry()[prim].Id()-1; omp_set_lock(&lock_array[row]); // b(row) += 0.0; for(unsigned int sec = 0 ; sec < (*it)->GetGeometry().size(); ++sec) { col = (*it)->GetGeometry()[sec].Id()-1; if(col == row) M(row, col) += 1.0; // else // M(row, col) += 0.0; } omp_unset_lock(&lock_array[row]); } } } } for(unsigned int i = 0; i < NumberOfNodes; ++i) omp_destroy_lock(&lock_array[i]); // solver the system pSolver->Solve(M, g, b); // transfer the solution to the nodal variables for(ModelPart::NodeIterator it = pModelPart->NodesBegin(); it != pModelPart->NodesEnd(); ++it) { it->GetSolutionStepValue(rThisVariable) = g((it->Id()-1)); } } /** * Transfer of rThisVariable defined on integration points to corresponding * nodal values. The transformation is done in a form that ensures a minimization * of L_2-norm error (/sum{rThisVariable- f(x)) whereas * f(x)= /sum{shape_func_i*rThisVariable_i} * @param model_part model_part on which the transfer should be done * @param rThisVariable Matrix-Variable which should be transferred * @see TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Matrix>& rThisVariable) * @see TransferVariablesToNodes(ModelPart& model_part, Variable<double>& rThisVariable) * @ref Jiao&Heath: "Common-refinement-based data transfer...", Int. * Journal for numer. meth. in eng. 61 (2004) 2402--2427 * WARNING: this may cause segmentation faults as the respective variables * will be created on nodal level while they are originally intended to be * stored on integration points! * * @param pSolver the solver used for solving the local system matrix * @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes * @param rThisVariable the variable need to transfer the respected values */ void TransferVariablesToNodes( LinearSolverType::Pointer& pSolver, ModelPart::Pointer& pModelPart, const Variable<Vector>& rThisVariable ) { ElementsArrayType& ElementsArray = pModelPart->Elements(); const unsigned int& Dim = (*(ElementsArray.ptr_begin()))->GetGeometry().WorkingSpaceDimension(); unsigned int VariableSize; if(rThisVariable.Name() == std::string("STRESSES") || rThisVariable.Name() == std::string("PLASTIC_STRAIN_VECTOR") || rThisVariable.Name() == std::string("PRESTRESS") || rThisVariable.Name() == std::string("STRAIN") // TODO: extend for more variables ) { VariableSize = Dim * (Dim + 1) / 2; } else KRATOS_THROW_ERROR(std::logic_error, rThisVariable.Name(), " is not a supported variable for TransferVariablesToNodes routine.") #ifdef ENABLE_PROFILING //profiling variables double start_compute, end_compute; start_compute = OpenMPUtils::GetCurrentTime(); #endif //Initialize system of equations unsigned int NumberOfNodes = pModelPart->NumberOfNodes(); SerialSparseSpaceType::MatrixType M(NumberOfNodes, NumberOfNodes); noalias(M)= ZeroMatrix(NumberOfNodes, NumberOfNodes); // create the structure for M a priori ConstructL2MatrixStructure<Element>(M, ElementsArray); #ifdef ENABLE_PROFILING end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "ConstructMatrixStructure completed: " << end_compute - start_compute << " s" << std::endl; start_compute = end_compute; #endif SerialDenseSpaceType::MatrixType g(NumberOfNodes, VariableSize); noalias(g)= ZeroMatrix(NumberOfNodes, VariableSize); SerialDenseSpaceType::MatrixType b(NumberOfNodes, VariableSize); noalias(b)= ZeroMatrix(NumberOfNodes, VariableSize); std::vector< omp_lock_t > lock_array(NumberOfNodes); for(unsigned int i = 0; i < NumberOfNodes; ++i) omp_init_lock(&lock_array[i]); //create a partition of the element array int number_of_threads = omp_get_max_threads(); std::vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, ElementsArray.size(), element_partition); KRATOS_WATCH( number_of_threads ) std::cout << "element_partition:"; for (std::size_t i = 0; i < element_partition.size(); ++i) std::cout << " " << element_partition[i]; std::cout << std::endl; for(int k = 0; k < number_of_threads; ++k) { Matrix InvJ(Dim, Dim); double DetJ; unsigned int row, col; typename ElementsArrayType::ptr_iterator it_begin = ElementsArray.ptr_begin() + element_partition[k]; typename ElementsArrayType::ptr_iterator it_end = ElementsArray.ptr_begin() + element_partition[k + 1]; for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it ) { if(!(*it)->GetValue(IS_INACTIVE)) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); // J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); // const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); IsogeometricGeometryType& rIsogeometricGeometry = dynamic_cast<IsogeometricGeometryType&>((*it)->GetGeometry()); J = rIsogeometricGeometry.Jacobian0(J, (*it)->GetIntegrationMethod()); GeometryType::ShapeFunctionsGradientsType DN_De; Matrix Ncontainer; rIsogeometricGeometry.CalculateShapeFunctionsIntegrationPointsValuesAndLocalGradients( Ncontainer, DN_De, (*it)->GetIntegrationMethod() ); // get the values at the integration_points std::vector<Vector> ValuesOnIntPoint(integration_points.size()); (*it)->CalculateOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, pModelPart->GetProcessInfo()); for(unsigned int point = 0; point < integration_points.size(); ++point) { MathUtils<double>::InvertMatrix(J[point], InvJ, DetJ); double dV = DetJ * integration_points[point].Weight(); for(unsigned int prim = 0; prim < (*it)->GetGeometry().size(); ++prim) { row = (*it)->GetGeometry()[prim].Id() - 1; omp_set_lock(&lock_array[row]); for(unsigned int i = 0; i < VariableSize; ++i) b(row, i) += ValuesOnIntPoint[point][i] * Ncontainer(point, prim) * dV; for(unsigned int sec = 0; sec < (*it)->GetGeometry().size(); ++sec) { col = (*it)->GetGeometry()[sec].Id() - 1; M(row, col) += Ncontainer(point, prim) * Ncontainer(point, sec) * dV; } omp_unset_lock(&lock_array[row]); } } } else { // for inactive elements the contribution to LHS is identity matrix and RHS is zero for(unsigned int prim = 0; prim < (*it)->GetGeometry().size(); ++prim) { row = (*it)->GetGeometry()[prim].Id() - 1; omp_set_lock(&lock_array[row]); // for(unsigned int i = 0; i < VariableSize; ++i) // b(row, i) += 0.0; for(unsigned int sec = 0; sec < (*it)->GetGeometry().size(); ++sec) { col = (*it)->GetGeometry()[sec].Id() - 1; if(col == row) M(row, col) += 1.0; // else // M(row, col) += 0.0; } omp_unset_lock(&lock_array[row]); } } } } for(unsigned int i = 0; i < NumberOfNodes; ++i) omp_destroy_lock(&lock_array[i]); #ifdef ENABLE_PROFILING end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Assemble the matrix completed: " << end_compute - start_compute << " s" << std::endl; start_compute = end_compute; #endif #ifdef DEBUG_MULTISOLVE KRATOS_WATCH(M) KRATOS_WATCH(b) KRATOS_WATCH(*pSolver) #endif // solve the system // solver must support the multisove method pSolver->Solve(M, g, b); #ifdef DEBUG_MULTISOLVE KRATOS_WATCH(g) #endif // transfer the solution to the nodal variables for(ModelPart::NodeIterator it = pModelPart->NodesBegin(); it != pModelPart->NodesEnd(); ++it) { Vector tmp(VariableSize); for(unsigned int i = 0; i < VariableSize; ++i) { tmp(i) = g((it->Id()-1), i); } it->GetSolutionStepValue(rThisVariable) = tmp; } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. BezierClassicalPostUtility& operator=(BezierClassicalPostUtility const& rOther) { return *this; } /// Copy constructor. BezierClassicalPostUtility(BezierClassicalPostUtility const& rOther) { } ///@} }; // Class BezierClassicalPostUtility ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function inline std::istream& operator >>(std::istream& rIStream, BezierClassicalPostUtility& rThis) { return rIStream; } /// output stream function inline std::ostream& operator <<(std::ostream& rOStream, const BezierClassicalPostUtility& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block }// namespace Kratos. #undef DEBUG_LEVEL1 #undef DEBUG_LEVEL2 #undef DEBUG_MULTISOLVE #undef DEBUG_GENERATE_MESH #undef ENABLE_PROFILING #endif
// // Project Name: Kratos // Last Modified by: $Author: hbui $ // Date: $Date: 2013-10-12 $ // Revision: $Revision: 1.0 $ // // #if !defined(KRATOS_BEZIER_CLASSICAL_POST_UTILITY_H_INCLUDED ) #define KRATOS_BEZIER_CLASSICAL_POST_UTILITY_H_INCLUDED // System includes #include <string> #include <vector> #include <iostream> // External includes #include <omp.h> #include "boost/progress.hpp" #ifdef ISOGEOMETRIC_USE_MPI #include "mpi.h" #endif // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "includes/element.h" #include "includes/properties.h" #include "includes/ublas_interface.h" #include "includes/deprecated_variables.h" #include "includes/legacy_structural_app_vars.h" #include "spaces/ublas_space.h" #include "linear_solvers/linear_solver.h" #include "utilities/openmp_utils.h" #include "utilities/auto_collapse_spatial_binning.h" #include "custom_utilities/iga_define.h" #include "custom_geometries/isogeometric_geometry.h" #include "custom_utilities/isogeometric_utility.h" #include "custom_utilities/isogeometric_post_utility.h" #include "isogeometric_application/isogeometric_application.h" //#define DEBUG_LEVEL1 //#define DEBUG_LEVEL2 //#define DEBUG_MULTISOLVE //#define DEBUG_GENERATE_MESH #define ENABLE_PROFILING namespace Kratos { ///@addtogroup IsogeometricApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ template<class T> void AddToModelPart(ModelPart& rModelPart, typename T::Pointer pE); template<> void AddToModelPart<Element>(ModelPart& rModelPart, typename Element::Pointer pE) { rModelPart.AddElement(pE); } template<> void AddToModelPart<Condition>(ModelPart& rModelPart, typename Condition::Pointer pC) { rModelPart.AddCondition(pC); } ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** A simple utility to export directly the FEM mesh out from isogeometric Bezier mesh. Each Bezier element will generate its own set of FEM elements. Therefore a large amount of nodes and elements may be generated. One shall carefully use this utility for large problem. Previously, this class is named IsogeometricClassicalPostUtility. */ class BezierClassicalPostUtility : public IsogeometricPostUtility { public: ///@name Type Definitions ///@{ typedef boost::numeric::ublas::vector<double> ValuesContainerType; typedef boost::numeric::ublas::matrix<double> ValuesArrayContainerType; typedef typename ModelPart::NodesContainerType NodesArrayType; typedef typename ModelPart::ElementsContainerType ElementsArrayType; typedef typename ModelPart::ConditionsContainerType ConditionsArrayType; typedef typename Element::GeometryType GeometryType; typedef typename GeometryType::PointType NodeType; typedef IsogeometricGeometry<NodeType> IsogeometricGeometryType; typedef typename GeometryType::IntegrationPointsArrayType IntegrationPointsArrayType; typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType; typedef typename NodeType::DofsContainerType DofsContainerType; typedef UblasSpace<double, CompressedMatrix, Vector> SerialSparseSpaceType; typedef UblasSpace<double, Matrix, Vector> SerialDenseSpaceType; typedef LinearSolver<SerialSparseSpaceType, SerialDenseSpaceType> LinearSolverType; typedef std::size_t IndexType; /// Pointer definition of BezierClassicalPostUtility KRATOS_CLASS_POINTER_DEFINITION(BezierClassicalPostUtility); ///@} ///@name Life Cycle ///@{ /// Default constructor. BezierClassicalPostUtility(ModelPart::Pointer pModelPart) : mpModelPart(pModelPart) { } /// Destructor. virtual ~BezierClassicalPostUtility() { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /// Generate the post model_part from reference model_part /// Deprecated void GenerateModelPart(ModelPart::Pointer pModelPartPost, PostElementType postElementType) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); #endif #ifdef DEBUG_LEVEL1 std::cout << typeid(*this).name() << "::GenerateModelPart" << std::endl; #endif ElementsArrayType& pElements = mpModelPart->Elements(); #ifdef DEBUG_LEVEL1 std::cout << "Retrieved pElements" << std::endl; #endif std::string NodeKey = std::string("Node"); //select the correct post element type std::string element_name; if(postElementType == _TRIANGLE_) element_name = std::string("KinematicLinear2D3N"); else if(postElementType == _QUADRILATERAL_) element_name = std::string("KinematicLinear2D4N"); else if(postElementType == _TETRAHEDRA_) element_name = std::string("KinematicLinear3D4N"); else if(postElementType == _HEXAHEDRA_) element_name = std::string("KinematicLinear3D8N"); else KRATOS_THROW_ERROR(std::logic_error, "This element type is not supported for isogeometric post-processing", __FUNCTION__); if(!KratosComponents<Element>::Has(element_name)) { std::stringstream buffer; buffer << "Element " << element_name << " is not registered in Kratos."; buffer << " Please check the spelling of the element name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Element const& rCloneElement = KratosComponents<Element>::Get(element_name); IndexType NodeCounter = 0; IndexType ElementCounter = 0; boost::progress_display show_progress( pElements.size() ); for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { if((*it)->GetValue( IS_INACTIVE )) { // std::cout << "Element " << (*it)->Id() << " is inactive" << std::endl; continue; } int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(Dim) #endif //get the properties Properties::Pointer pDummyProperties = (*it)->pGetProperties(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(*pDummyProperties) #endif // generate list of nodes if(Dim == 1) { // TODO } else if(Dim == 2) { IndexType NumDivision1 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_2) ); IndexType i, j; CoordinatesArrayType p_ref; CoordinatesArrayType p; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes p_ref[2] = 0.0; for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; p = GlobalCoordinates((*it)->GetGeometry(), p, p_ref); NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(++NodeCounter); #ifdef DEBUG_GENERATE_MESH // if(NodeCounter == 585 || NodeCounter == 588 || NodeCounter == 589) if(NodeCounter) { std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl; } #endif // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&pModelPartPost->GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(pModelPartPost->GetBufferSize()); pModelPartPost->AddNode(pNewNode); mNodeToLocalCoordinates(pNewNode->Id()) = p_ref; mNodeToElement(pNewNode->Id()) = (*it)->Id(); } } //for correct mapping to element, the repetitive node is allowed. // pModelPartPost->Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(pModelPartPost->Nodes().size()) std::cout << "Generating Elements..." << std::endl; #endif // create and add element // Element::NodesArrayType temp_element_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // int Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; // int Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; // int Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; // int Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; // if(postElementType == _TRIANGLE_) // { // // TODO: check if jacobian checking is necessary // temp_element_nodes.clear(); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node2, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base())); // Element::Pointer NewElement1 = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties); // pModelPartPost->AddElement(NewElement1); // mOldToNewElements[(*it)->Id()].insert(ElementCounter); // temp_element_nodes.clear(); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node3, NodeKey).base())); // Element::Pointer NewElement2 = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties); // pModelPartPost->AddElement(NewElement2); // mOldToNewElements[(*it)->Id()].insert(ElementCounter); // } // else if(postElementType == _QUADRILATERAL_) // { // // TODO: check if jacobian checking is necessary // temp_element_nodes.clear(); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node2, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node3, NodeKey).base())); // Element::Pointer NewElement = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties); // pModelPartPost->AddElement(NewElement); // mOldToNewElements[(*it)->Id()].insert(ElementCounter); // } // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { IndexType Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; IndexType Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; IndexType Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; IndexType Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; if(postElementType == _TRIANGLE_) { connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4}); connectivities.push_back(std::vector<IndexType>{Node1, Node4, Node3}); } else if(postElementType == _QUADRILATERAL_) { connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3}); } } } ElementsArrayType pNewElements = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, Element, ElementsArrayType>( connectivities, *pModelPartPost, rCloneElement, ElementCounter, pDummyProperties, NodeKey); for (typename ElementsArrayType::ptr_iterator it2 = pNewElements.ptr_begin(); it2 != pNewElements.ptr_end(); ++it2) { pModelPartPost->AddElement(*it2); mOldToNewElements[(*it)->Id()].insert((*it2)->Id()); } pModelPartPost->Elements().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(pModelPartPost->Elements().size()) #endif } else if(Dim == 3) { IndexType NumDivision1 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_2) ); IndexType NumDivision3 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_3) ); IndexType i, j, k; CoordinatesArrayType p_ref; CoordinatesArrayType p; #ifdef DEBUG_LEVEL1 KRATOS_WATCH((*it)->Id()) KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) KRATOS_WATCH(NumDivision3) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; for(k = 0; k <= NumDivision3; ++k) { p_ref[2] = ((double) k) / NumDivision3; p = GlobalCoordinates((*it)->GetGeometry(), p, p_ref); NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(++NodeCounter); #ifdef DEBUG_GENERATE_MESH if(NodeCounter) { std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl; } #endif // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&pModelPartPost->GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(pModelPartPost->GetBufferSize()); pModelPartPost->AddNode(pNewNode); mNodeToLocalCoordinates(pNewNode->Id()) = p_ref; mNodeToElement(pNewNode->Id()) = (*it)->Id(); } } } //for correct mapping to element, the repetitive node is allowed. // pModelPartPost->Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(pModelPartPost->Nodes().size()) std::cout << "Generating Elements..." << std::endl; #endif // create and add element // Element::NodesArrayType temp_element_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // for(k = 0; k < NumDivision3; ++k) // { // int Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node5 = Node1 + 1; // int Node6 = Node2 + 1; // int Node7 = Node3 + 1; // int Node8 = Node4 + 1; // if(postElementType == _TETRAHEDRA_) // { // // TODO: check if jacobian checking is necessary // } // else if(postElementType == _HEXAHEDRA_) // { // // TODO: check if jacobian checking is necessary // temp_element_nodes.clear(); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node2, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node3, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node5, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node6, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node8, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node7, NodeKey).base())); // Element::Pointer NewElement = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties); // pModelPartPost->AddElement(NewElement); // mOldToNewElements[(*it)->Id()].insert(ElementCounter); // } // } // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { for(k = 0; k < NumDivision3; ++k) { IndexType Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node5 = Node1 + 1; IndexType Node6 = Node2 + 1; IndexType Node7 = Node3 + 1; IndexType Node8 = Node4 + 1; if(postElementType == _TETRAHEDRA_) { // TODO: check if creating Tetrahedra is correct connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4}); connectivities.push_back(std::vector<IndexType>{Node1, Node4, Node3}); } else if(postElementType == _HEXAHEDRA_) { connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3, Node5, Node6, Node8, Node7}); } } } } ElementsArrayType pNewElements = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, Element, ElementsArrayType>( connectivities, *pModelPartPost, rCloneElement, ElementCounter, pDummyProperties, NodeKey); for (typename ElementsArrayType::ptr_iterator it2 = pNewElements.ptr_begin(); it2 != pNewElements.ptr_end(); ++it2) { pModelPartPost->AddElement(*it2); mOldToNewElements[(*it)->Id()].insert((*it2)->Id()); } pModelPartPost->Elements().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(pModelPartPost->Elements().size()) #endif } ++show_progress; } #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "GeneratePostModelPart completed: " << (end_compute - start_compute) << " s" << std::endl; #else std::cout << "GeneratePostModelPart completed" << std::endl; #endif std::cout << NodeCounter << " nodes and " << ElementCounter << " elements are created" << std::endl; } /// Generate the post model_part from reference model_part /// this is the improved version of GenerateModelPart /// which uses template function to generate post Elements for both Element and Condition void GenerateModelPart2(ModelPart::Pointer pModelPartPost, const bool& generate_for_condition) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); #endif #ifdef DEBUG_LEVEL1 std::cout << typeid(*this).name() << "::GenerateModelPart" << std::endl; #endif ElementsArrayType& pElements = mpModelPart->Elements(); ConditionsArrayType& pConditions = mpModelPart->Conditions(); std::string NodeKey = std::string("Node"); IndexType NodeCounter = 0; IndexType ElementCounter = 0; boost::progress_display show_progress( pElements.size() ); std::vector<std::size_t> dummy_ids; for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { // This is wrong, we will not skill the IS_INACTIVE elements // TODO: to be deleted // if((*it)->GetValue( IS_INACTIVE )) // { //// std::cout << "Element " << (*it)->Id() << " is inactive" << std::endl; // ++show_progress; // continue; // } if((*it)->pGetGeometry() == 0) KRATOS_THROW_ERROR(std::logic_error, "Error: geometry is NULL at element", (*it)->Id()) int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(Dim) KRATOS_WATCH(ReducedDim) #endif //select the correct post element type std::string element_name; if((Dim == 2) && (ReducedDim == 2)) { element_name = std::string("KinematicLinear2D4N"); } else if((Dim == 3) && (ReducedDim == 2)) { element_name = std::string("KinematicLinear2D4N"); } else if((Dim == 3) && (ReducedDim == 3)) { element_name = std::string("KinematicLinear3D8N"); } else { std::stringstream ss; ss << "Invalid dimension of "; ss << typeid(*(*it)).name(); ss << ", Dim = " << Dim; ss << ", ReducedDim = " << ReducedDim; KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__); } if(!KratosComponents<Element>::Has(element_name)) { std::stringstream buffer; buffer << "Element " << element_name << " is not registered in Kratos."; buffer << " Please check the spelling of the element name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Element const& rCloneElement = KratosComponents<Element>::Get(element_name); GenerateForOneEntity<Element, ElementsArrayType, 1>(*pModelPartPost, *(*it), rCloneElement, NodeCounter_old, NodeCounter, ElementCounter, NodeKey, false, dummy_ids, dummy_ids, false); ++show_progress; } KRATOS_WATCH(ElementCounter) #ifdef DEBUG_LEVEL1 std::cout << "Done generating for elements" << std::endl; #endif IndexType ConditionCounter = 0; if (generate_for_condition) { boost::progress_display show_progress2( pConditions.size() ); for (typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it) { // This is wrong, we will not kill the IS_INACTIVE conditions // TODO: to be deleted // if((*it)->GetValue( IS_INACTIVE )) // { //// std::cout << "Condition " << (*it)->Id() << " is inactive" << std::endl; // ++show_progress2; // continue; // } if((*it)->pGetGeometry() == 0) KRATOS_THROW_ERROR(std::logic_error, "Error: geometry is NULL at condition", (*it)->Id()) int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(typeid((*it)->GetGeometry()).name()) KRATOS_WATCH(Dim) KRATOS_WATCH(ReducedDim) #endif //select the correct post condition type std::string condition_name; if(Dim == 3 && ReducedDim == 1) condition_name = std::string("LineForce3D2N"); else if(Dim == 3 && ReducedDim == 2) condition_name = std::string("FaceForce3D4N"); else { std::stringstream ss; ss << "Invalid dimension of "; ss << typeid(*(*it)).name(); ss << ", Dim = " << Dim; ss << ", ReducedDim = " << ReducedDim; ss << ". Condition " << (*it)->Id() << " will be skipped."; // KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__); continue; } if(!KratosComponents<Condition>::Has(condition_name)) { std::stringstream buffer; buffer << "Condition " << condition_name << " is not registered in Kratos."; buffer << " Please check the spelling of the condition name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Condition const& rCloneCondition = KratosComponents<Condition>::Get(condition_name); GenerateForOneEntity<Condition, ConditionsArrayType, 2>(*pModelPartPost, *(*it), rCloneCondition, NodeCounter_old, NodeCounter, ConditionCounter, NodeKey, false, dummy_ids, dummy_ids, false); ++show_progress2; } KRATOS_WATCH(ConditionCounter) } #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "GeneratePostModelPart2 completed: " << (end_compute - start_compute) << " s" << std::endl; #else std::cout << "GeneratePostModelPart2 completed" << std::endl; #endif std::cout << NodeCounter << " nodes and " << ElementCounter << " elements"; if (generate_for_condition) std::cout << ", " << ConditionCounter << " conditions"; std::cout << " are created" << std::endl; } // Generate the post model_part from reference model_part // this is the improved version of GenerateModelPart // which uses template function to generate post Elements for both Element and Condition // this version used a collapsing utility to collapse nodes automatically void GenerateModelPart2AutoCollapse(ModelPart::Pointer pModelPartPost, double dx, double dy, double dz, double tol) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); #endif #ifdef DEBUG_LEVEL1 std::cout << typeid(*this).name() << "::GenerateModelPart" << std::endl; #endif AutoCollapseSpatialBinning collapse_util(0.0, 0.0, 0.0, dx, dy, dz, tol); ElementsArrayType& pElements = mpModelPart->Elements(); ConditionsArrayType& pConditions = mpModelPart->Conditions(); std::string NodeKey = std::string("Node"); IndexType NodeCounter = 0; IndexType ElementCounter = 0; boost::progress_display show_progress( pElements.size() ); VectorMap<IndexType, IndexType> MapToCollapseNode; for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { if((*it)->GetValue( IS_INACTIVE )) { // std::cout << "Element " << (*it)->Id() << " is inactive" << std::endl; ++show_progress; continue; } int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(Dim) KRATOS_WATCH(ReducedDim) #endif //select the correct post element type std::string element_name; if(Dim == 2 && ReducedDim == 2) element_name = std::string("KinematicLinear2D4N"); else if(Dim == 3 && ReducedDim == 3) element_name = std::string("KinematicLinear3D8N"); else { std::stringstream ss; ss << "Invalid dimension of "; ss << typeid(*(*it)).name(); ss << ", Dim = " << Dim; ss << ", ReducedDim = " << ReducedDim; KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__); } if(!KratosComponents<Element>::Has(element_name)) { std::stringstream buffer; buffer << "Element " << element_name << " is not registered in Kratos."; buffer << " Please check the spelling of the element name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Element const& rCloneElement = KratosComponents<Element>::Get(element_name); GenerateForOneEntityAutoCollapse<Element, ElementsArrayType, 1>(collapse_util, *pModelPartPost, *(*it), rCloneElement, MapToCollapseNode, NodeCounter_old, NodeCounter, ElementCounter, NodeKey); ++show_progress; } #ifdef DEBUG_LEVEL1 std::cout << "Done generating for elements" << std::endl; #endif IndexType ConditionCounter = 0; boost::progress_display show_progress2( pConditions.size() ); for (typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it) { if((*it)->GetValue( IS_INACTIVE )) { // std::cout << "Condition " << (*it)->Id() << " is inactive" << std::endl; ++show_progress2; continue; } int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(typeid((*it)->GetGeometry()).name()) KRATOS_WATCH(Dim) KRATOS_WATCH(ReducedDim) #endif //select the correct post condition type std::string condition_name; if(Dim == 3 && ReducedDim == 1) condition_name = std::string("LineForce3D2N"); else if(Dim == 3 && ReducedDim == 2) condition_name = std::string("FaceForce3D4N"); else { std::stringstream ss; ss << "Invalid dimension of "; ss << typeid(*(*it)).name(); ss << ", Dim = " << Dim; ss << ", ReducedDim = " << ReducedDim; KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__); } if(!KratosComponents<Condition>::Has(condition_name)) { std::stringstream buffer; buffer << "Condition " << condition_name << " is not registered in Kratos."; buffer << " Please check the spelling of the condition name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Condition const& rCloneCondition = KratosComponents<Condition>::Get(condition_name); GenerateForOneEntityAutoCollapse<Condition, ConditionsArrayType, 2>(collapse_util, *pModelPartPost, *(*it), rCloneCondition, MapToCollapseNode, NodeCounter_old, NodeCounter, ConditionCounter, NodeKey); ++show_progress2; } #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Generate PostModelPart completed: " << (end_compute - start_compute) << " s" << std::endl; #else std::cout << "Generate PostModelPart completed" << std::endl; #endif std::cout << NodeCounter << " nodes and " << ElementCounter << " elements" << ", " << ConditionCounter << " conditions are created" << std::endl; } /** * Utility function to generate elements/conditions for element/condition * if TEntityType==Element, type must be 1; if T==Condition, type is 2 */ template<class TEntityType, class TEntityContainerType, std::size_t type> void GenerateForOneEntity(ModelPart& rModelPart, TEntityType& rE, TEntityType const& rSample, IndexType NodeCounter_old, IndexType& NodeCounter, IndexType& EntityCounter, const std::string& NodeKey, const bool& transfer_nodal_var, std::vector<std::size_t>& node_ids, std::vector<std::size_t>& element_ids, const bool& get_indices) { // int ReducedDim = rE.GetGeometry().WorkingSpaceDimension(); int ReducedDim = rE.GetGeometry().Dimension(); //get the properties Properties::Pointer pDummyProperties = rE.pGetProperties(); #ifdef DEBUG_LEVEL1 std::cout << "Generating for " << rE.Info() << std::endl; KRATOS_WATCH(*pDummyProperties) KRATOS_WATCH(EntityCounter) #endif // generate list of nodes if(ReducedDim == 1) { // TODO } else if(ReducedDim == 2) { IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) ); IndexType i, j; CoordinatesArrayType p_ref; CoordinatesArrayType p; Vector shape_values; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes p_ref[2] = 0.0; for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; p = GlobalCoordinates(rE.GetGeometry(), p, p_ref); NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(++NodeCounter); // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(rModelPart.GetBufferSize()); rModelPart.AddNode(pNewNode); if(type == 1) { mNodeToLocalCoordinates(pNewNode->Id()) = p_ref; mNodeToElement(pNewNode->Id()) = rE.Id(); } if (transfer_nodal_var) { shape_values = rE.GetGeometry().ShapeFunctionsValues(shape_values, p_ref); VariablesList& var_list = rModelPart.GetNodalSolutionStepVariablesList(); for (VariablesList::const_iterator it = var_list.begin(); it != var_list.end(); ++it) { if (typeid(*it) == typeid(Variable<double>)) { const Variable<double>& my_variable = dynamic_cast<const Variable<double>&>(*it); double value = 0.0; for (std::size_t n = 0; n < rE.GetGeometry().size(); ++n) value += shape_values[n] * rE.GetGeometry()[n].GetSolutionStepValue(my_variable); pNewNode->GetSolutionStepValue(my_variable) = value; } else if (typeid(*it) == typeid(Variable<array_1d<double, 3> >)) { const Variable<array_1d<double, 3> >& my_variable = dynamic_cast<const Variable<array_1d<double, 3> >&>(*it); array_1d<double, 3> value; noalias(value) = ZeroVector(3); for (std::size_t n = 0; n < rE.GetGeometry().size(); ++n) noalias(value) += shape_values[n] * rE.GetGeometry()[n].GetSolutionStepValue(my_variable); pNewNode->GetSolutionStepValue(my_variable) = value; } } } if (get_indices) node_ids.push_back(pNewNode->Id()); } } //for correct mapping to element, the repetitive node is allowed. // rModelPart.Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rModelPart.Nodes().size()) if(type == 1) std::cout << "Generating Elements..." << std::endl; else std::cout << "Generating Conditions..." << std::endl; #endif // create and add element // typename T::NodesArrayType temp_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // int Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; // int Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; // int Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; // int Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; // // TODO: check if jacobian checking is necessary // temp_nodes.clear(); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node1, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node2, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node4, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node3, NodeKey).base())); // int NewEntityId = ++EntityCounter; // // int NewEntityId = rE.Id(); ++EntityCounter; // typename TEntityType::Pointer NewEntity = rSample.Create(NewEntityId, temp_nodes, pDummyProperties); // AddToModelPart<TEntityType>(rModelPart, NewEntity); // if(type == 1) // mOldToNewElements[rE.Id()].insert(NewEntityId); // else if(type == 2) // mOldToNewConditions[rE.Id()].insert(NewEntityId); // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { IndexType Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; IndexType Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; IndexType Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; IndexType Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3}); } } TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>( connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey); for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { AddToModelPart<TEntityType>(rModelPart, *it2); if(type == 1) mOldToNewElements[rE.Id()].insert((*it2)->Id()); else if(type == 2) mOldToNewConditions[rE.Id()].insert((*it2)->Id()); } if(type == 1) rModelPart.Elements().Unique(); else if(type == 2) rModelPart.Conditions().Unique(); #ifdef DEBUG_LEVEL1 if(type == 1) KRATOS_WATCH(rModelPart.Elements().size()) else KRATOS_WATCH(rModelPart.Conditions().size()) #endif if (get_indices) { for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { element_ids.push_back((*it2)->Id()); } } } else if(ReducedDim == 3) { IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) ); IndexType NumDivision3 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_3) ); IndexType i, j, k; CoordinatesArrayType p_ref; CoordinatesArrayType p; Vector shape_values; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rE.Id()) KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) KRATOS_WATCH(NumDivision3) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; for(k = 0; k <= NumDivision3; ++k) { p_ref[2] = ((double) k) / NumDivision3; p = GlobalCoordinates(rE.GetGeometry(), p, p_ref); NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(++NodeCounter); #ifdef DEBUG_GENERATE_MESH if(NodeCounter) { std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl; } #endif // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(rModelPart.GetBufferSize()); rModelPart.AddNode(pNewNode); if(type == 1) { mNodeToLocalCoordinates(pNewNode->Id()) = p_ref; mNodeToElement(pNewNode->Id()) = rE.Id(); } if (transfer_nodal_var) { shape_values = rE.GetGeometry().ShapeFunctionsValues(shape_values, p_ref); VariablesList& var_list = rModelPart.GetNodalSolutionStepVariablesList(); for (VariablesList::const_iterator it = var_list.begin(); it != var_list.end(); ++it) { if (typeid(*it) == typeid(Variable<double>)) { const Variable<double>& my_variable = dynamic_cast<const Variable<double>&>(*it); double value = 0.0; for (std::size_t n = 0; n < rE.GetGeometry().size(); ++n) value += shape_values[n] * rE.GetGeometry()[n].GetSolutionStepValue(my_variable); pNewNode->GetSolutionStepValue(my_variable) = value; } else if (typeid(*it) == typeid(Variable<array_1d<double, 3> >)) { const Variable<array_1d<double, 3> >& my_variable = dynamic_cast<const Variable<array_1d<double, 3> >&>(*it); array_1d<double, 3> value; noalias(value) = ZeroVector(3); for (std::size_t n = 0; n < rE.GetGeometry().size(); ++n) noalias(value) += shape_values[n] * rE.GetGeometry()[n].GetSolutionStepValue(my_variable); pNewNode->GetSolutionStepValue(my_variable) = value; } } } if (get_indices) node_ids.push_back(pNewNode->Id()); } } } //for correct mapping to element, the repetitive node is allowed. // rModelPart.Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rModelPart.Nodes().size()) if(type == 1) std::cout << "Generating Elements..." << std::endl; else std::cout << "Generating Conditions..." << std::endl; #endif // create and add element // typename T::NodesArrayType temp_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // for(k = 0; k < NumDivision3; ++k) // { // int Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node5 = Node1 + 1; // int Node6 = Node2 + 1; // int Node7 = Node3 + 1; // int Node8 = Node4 + 1; // // TODO: check if jacobian checking is necessary // temp_nodes.clear(); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node1, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node2, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node4, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node3, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node5, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node6, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node8, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node7, NodeKey).base())); // int NewEntityId = ++EntityCounter; // typename TEntityType::Pointer NewEntity = rSample.Create(NewEntityId, temp_nodes, pDummyProperties); // AddToModelPart<TEntityType>(rModelPart, NewEntity); // if(type == 1) // mOldToNewElements[rE.Id()].insert(NewEntityId); // else if(type == 2) // mOldToNewConditions[rE.Id()].insert(NewEntityId); // } // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { for(k = 0; k < NumDivision3; ++k) { IndexType Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node5 = Node1 + 1; IndexType Node6 = Node2 + 1; IndexType Node7 = Node3 + 1; IndexType Node8 = Node4 + 1; connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3, Node5, Node6, Node8, Node7}); } } } TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>( connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey); for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { AddToModelPart<TEntityType>(rModelPart, *it2); if(type == 1) mOldToNewElements[rE.Id()].insert((*it2)->Id()); else if(type == 2) mOldToNewConditions[rE.Id()].insert((*it2)->Id()); } if(type == 1) rModelPart.Elements().Unique(); else if(type == 2) rModelPart.Conditions().Unique(); #ifdef DEBUG_LEVEL1 if(type == 1) KRATOS_WATCH(rModelPart.Elements().size()) else KRATOS_WATCH(rModelPart.Conditions().size()) #endif if (get_indices) { for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { element_ids.push_back((*it2)->Id()); } } } } /** * Utility function to generate elements/conditions for element/condition. * This uses a collapse utility to automatically merge the coincident nodes * if T==Element, type must be 1; otherwise type=2 */ template<class TEntityType, class TEntityContainerType, std::size_t type> void GenerateForOneEntityAutoCollapse(AutoCollapseSpatialBinning& collapse_util, ModelPart& rModelPart, TEntityType& rE, TEntityType const& rSample, VectorMap<IndexType, IndexType>& rMapToCollapseNode, IndexType NodeCounter_old, IndexType& NodeCounter, IndexType& EntityCounter, const std::string& NodeKey) { // int ReducedDim = rE.GetGeometry().WorkingSpaceDimension(); int ReducedDim = rE.GetGeometry().Dimension(); //get the properties Properties::Pointer pDummyProperties = rE.pGetProperties(); #ifdef DEBUG_LEVEL1 if(type == 1) std::cout << "Generating for element " << rE.Id() << std::endl; else std::cout << "Generating for condition " << rE.Id() << std::endl; KRATOS_WATCH(*pDummyProperties) #endif // generate list of nodes if(ReducedDim == 1) { // TODO } else if(ReducedDim == 2) { IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) ); IndexType i, j; CoordinatesArrayType p_ref; CoordinatesArrayType p; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes p_ref[2] = 0.0; for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; p = GlobalCoordinates(rE.GetGeometry(), p, p_ref); IndexType id = static_cast<IndexType>( collapse_util.AddNode(p[0], p[1], p[2]) ); ++NodeCounter; rMapToCollapseNode[NodeCounter] = id; if(rModelPart.Nodes().find(id) == rModelPart.Nodes().end()) { // this is a new node NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(id); // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(rModelPart.GetBufferSize()); rModelPart.AddNode(pNewNode); } else { // this is an old node, not required to add to model_part // so do nothing } // in this way, the node will always point to the last local coodinates and element if(type == 1) { mNodeToLocalCoordinates(id) = p_ref; mNodeToElement(id) = rE.Id(); } } } //for correct mapping to element, the repetitive node is allowed. // rModelPart.Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rModelPart.Nodes().size()) if(type == 1) std::cout << "Generating Elements..." << std::endl; else std::cout << "Generating Conditions..." << std::endl; #endif // create and add element // typename T::NodesArrayType temp_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // int Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; // int Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; // int Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; // int Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; // // TODO: check if jacobian checking is necessary // temp_nodes.clear(); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node1], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node2], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node4], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node3], NodeKey).base())); // typename T::Pointer NewEntity = rSample.Create(++EntityCounter, temp_nodes, pDummyProperties); // AddToModelPart<T>(rModelPart, NewEntity); // if(type == 1) // mOldToNewElements[rE.Id()].insert(EntityCounter); // else if(type == 2) // mOldToNewConditions[rE.Id()].insert(EntityCounter); // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { IndexType Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; IndexType Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; IndexType Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; IndexType Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; connectivities.push_back(std::vector<IndexType>{ rMapToCollapseNode[Node1], rMapToCollapseNode[Node2], rMapToCollapseNode[Node4], rMapToCollapseNode[Node3]}); } } TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>( connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey); for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { AddToModelPart<TEntityType>(rModelPart, *it2); if(type == 1) mOldToNewElements[rE.Id()].insert((*it2)->Id()); else if(type == 2) mOldToNewConditions[rE.Id()].insert((*it2)->Id()); } if(type == 1) rModelPart.Elements().Unique(); else if(type == 2) rModelPart.Conditions().Unique(); #ifdef DEBUG_LEVEL1 if(type == 1) KRATOS_WATCH(rModelPart.Elements().size()) else KRATOS_WATCH(rModelPart.Conditions().size()) #endif } else if(ReducedDim == 3) { IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) ); IndexType NumDivision3 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_3) ); IndexType i, j, k; CoordinatesArrayType p_ref; CoordinatesArrayType p; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rE.Id()) KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) KRATOS_WATCH(NumDivision3) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; for(k = 0; k <= NumDivision3; ++k) { p_ref[2] = ((double) k) / NumDivision3; p = GlobalCoordinates(rE.GetGeometry(), p, p_ref); IndexType id = static_cast<IndexType>( collapse_util.AddNode(p[0], p[1], p[2]) ); ++NodeCounter; rMapToCollapseNode[NodeCounter] = id; if(rModelPart.Nodes().find(id) == rModelPart.Nodes().end()) { // this is a new node NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(id); #ifdef DEBUG_GENERATE_MESH if(NodeCounter) { std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl; } #endif // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(rModelPart.GetBufferSize()); rModelPart.AddNode(pNewNode); } else { // this is an old node, not required to add to model_part // so do nothing } // in this way, the node will always point to the last local coodinates and element if(type == 1) { mNodeToLocalCoordinates(id) = p_ref; mNodeToElement(id) = rE.Id(); } } } } //for correct mapping to element, the repetitive node is allowed. // rModelPart.Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rModelPart.Nodes().size()) if(type == 1) std::cout << "Generating Elements..." << std::endl; else std::cout << "Generating Conditions..." << std::endl; #endif // create and add element // typename T::NodesArrayType temp_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // for(k = 0; k < NumDivision3; ++k) // { // int Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node5 = Node1 + 1; // int Node6 = Node2 + 1; // int Node7 = Node3 + 1; // int Node8 = Node4 + 1; // // TODO: check if jacobian checking is necessary // temp_nodes.clear(); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node1], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node2], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node4], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node3], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node5], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node6], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node8], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node7], NodeKey).base())); // typename T::Pointer NewEntity = rSample.Create(++EntityCounter, temp_nodes, pDummyProperties); // AddToModelPart<T>(rModelPart, NewEntity); // if(type == 1) // mOldToNewElements[rE.Id()].insert(EntityCounter); // else if(type == 2) // mOldToNewConditions[rE.Id()].insert(EntityCounter); // } // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { for(k = 0; k < NumDivision3; ++k) { IndexType Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node5 = Node1 + 1; IndexType Node6 = Node2 + 1; IndexType Node7 = Node3 + 1; IndexType Node8 = Node4 + 1; connectivities.push_back(std::vector<IndexType>{ rMapToCollapseNode[Node1], rMapToCollapseNode[Node2], rMapToCollapseNode[Node4], rMapToCollapseNode[Node3], rMapToCollapseNode[Node5], rMapToCollapseNode[Node6], rMapToCollapseNode[Node8], rMapToCollapseNode[Node7]}); } } } TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>( connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey); for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { AddToModelPart<TEntityType>(rModelPart, *it2); if(type == 1) mOldToNewElements[rE.Id()].insert((*it2)->Id()); else if(type == 2) mOldToNewConditions[rE.Id()].insert((*it2)->Id()); } if(type == 1) rModelPart.Elements().Unique(); else if(type == 2) rModelPart.Conditions().Unique(); #ifdef DEBUG_LEVEL1 if(type == 1) KRATOS_WATCH(rModelPart.Elements().size()) else KRATOS_WATCH(rModelPart.Conditions().size()) #endif } } // Synchronize the activation between model_parts void SynchronizeActivation(ModelPart::Pointer pModelPartPost) { ElementsArrayType& pElements = mpModelPart->Elements(); for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { std::set<IndexType> NewElements = mOldToNewElements[(*it)->Id()]; for(std::set<IndexType>::iterator it2 = NewElements.begin(); it2 != NewElements.end(); ++it2) { pModelPartPost->GetElement(*it2).GetValue(IS_INACTIVE) = (*it)->GetValue( IS_INACTIVE ); } } ConditionsArrayType& pConditions = mpModelPart->Conditions(); for (typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it) { std::set<IndexType> NewConditions = mOldToNewConditions[(*it)->Id()]; for(std::set<IndexType>::iterator it2 = NewConditions.begin(); it2 != NewConditions.end(); ++it2) { pModelPartPost->GetCondition(*it2).GetValue(IS_INACTIVE) = (*it)->GetValue( IS_INACTIVE ); } } } // transfer the elemental data template<class TVariableType> void TransferElementalData(const TVariableType& rThisVariable, ModelPart::Pointer pModelPartPost) { ElementsArrayType& pElements = mpModelPart->Elements(); for(typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { std::set<IndexType> NewElements = mOldToNewElements[(*it)->Id()]; for(std::set<IndexType>::iterator it2 = NewElements.begin(); it2 != NewElements.end(); ++it2) { pModelPartPost->GetElement(*it2).GetValue(rThisVariable) = (*it)->GetValue(rThisVariable); } } } // transfer the conditional data template<class TVariableType> void TransferConditionalData(const TVariableType& rThisVariable, ModelPart::Pointer pModelPartPost) { ConditionsArrayType& pConditions = mpModelPart->Conditions(); for(typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it) { std::set<IndexType> NewConditions = mOldToNewConditions[(*it)->Id()]; for(std::set<IndexType>::iterator it2 = NewConditions.begin(); it2 != NewConditions.end(); ++it2) { pModelPartPost->GetCondition(*it2).GetValue(rThisVariable) = (*it)->GetValue(rThisVariable); } } } // Synchronize post model_part with the reference model_part template<class TVariableType> void TransferNodalResults( const TVariableType& rThisVariable, const ModelPart::Pointer pModelPartPost ) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); #endif NodesArrayType& pTargetNodes = pModelPartPost->Nodes(); ElementsArrayType& pElements = mpModelPart->Elements(); typename TVariableType::Type Results; CoordinatesArrayType LocalPos; IndexType ElementId; // #pragma omp parallel for //TODO: check this. This is not parallelized. for(NodesArrayType::ptr_iterator it = pTargetNodes.ptr_begin(); it != pTargetNodes.ptr_end(); ++it) { IndexType key = (*it)->Id(); if(mNodeToElement.find(key) != mNodeToElement.end()) { ElementId = mNodeToElement[key]; if( ! pElements(ElementId)->GetValue(IS_INACTIVE) ) // skip the inactive elements { noalias(LocalPos) = mNodeToLocalCoordinates[key]; Results = CalculateOnPoint(rThisVariable, Results, pElements(ElementId), LocalPos); (*it)->GetSolutionStepValue(rThisVariable) = Results; } } } #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Transfer nodal point results for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << " s" << std::endl; #endif } // Synchronize post model_part with the reference model_part template<class TVariableType> void TransferIntegrationPointResults( const TVariableType& rThisVariable, const ModelPart::Pointer pModelPartPost, LinearSolverType::Pointer pSolver ) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); std::cout << "########################################" << std::endl; std::cout << "Transfer integration point results for " << rThisVariable.Name() << " starts" << std::endl; #endif // firstly transfer rThisVariable from integration points of reference model_part to its nodes TransferVariablesToNodes(pSolver, mpModelPart, rThisVariable); // secondly transfer new nodal variables results to the post model_part TransferNodalResults(rThisVariable, pModelPartPost); #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Transfer integration point results for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << "s" << std::endl; std::cout << "########################################" << std::endl; #endif } // Transfer the variable to nodes for model_part template<class TVariableType> void TransferVariablesToNodes( const TVariableType& rThisVariable, ModelPart::Pointer pModelPart, LinearSolverType::Pointer pSolver ) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); std::cout << "########################################" << std::endl; std::cout << "Transfer integration point results to nodes for " << rThisVariable.Name() << " starts" << std::endl; #endif TransferVariablesToNodes(pSolver, pModelPart, rThisVariable); #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Transfer integration point results to nodes for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << "s" << std::endl; std::cout << "########################################" << std::endl; #endif } /** * Utility function to renumber the nodes of the post model_part (for parallel merge) */ void GlobalNodalRenumbering(ModelPart::Pointer pModelPartPost) { #ifdef ISOGEOMETRIC_USE_MPI int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); // gather the number of nodes on each process int NumberOfNodes[size]; int MyNumberOfNodes = pModelPartPost->NumberOfNodes(); MPI_Allgather(&MyNumberOfNodes, 1, MPI_INT, NumberOfNodes, 1, MPI_INT, MPI_COMM_WORLD); // std::cout << "NumberOfNodes:"; // for(int i = 0; i < size; ++i) // std::cout << " " << NumberOfNodes[i]; // std::cout << std::endl; // compute the numbering offset int offset = 0; for(int i = 0; i < rank; ++i) offset += NumberOfNodes[i]; // renumber the nodes of the current process for(ModelPart::NodeIterator it = pModelPartPost->NodesBegin(); it != pModelPartPost->NodesEnd(); ++it) { it->SetId(++offset); it->GetSolutionStepValue(PARTITION_INDEX) = rank; } if(rank == 0) std::cout << "Global renumbering completed" << std::endl; #endif } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "BezierClassicalPostUtility"; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "BezierClassicalPostUtility"; } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ModelPart::Pointer mpModelPart; // pointer variable to a model_part VectorMap<IndexType, CoordinatesArrayType> mNodeToLocalCoordinates; // vector map to store local coordinates of node on a NURBS entity VectorMap<IndexType, IndexType> mNodeToElement; // vector map to store local coordinates of node on a NURBS entity std::map<IndexType, std::set<IndexType> > mOldToNewElements; // vector map to store id map from old element to new elements std::map<IndexType, std::set<IndexType> > mOldToNewConditions; // vector map to store id map from old condition to new conditions ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * Calculate global coodinates w.r.t initial configuration */ CoordinatesArrayType& GlobalCoordinates( GeometryType& rGeometry, CoordinatesArrayType& rResult, CoordinatesArrayType const& LocalCoordinates ) { noalias( rResult ) = ZeroVector( 3 ); Vector ShapeFunctionsValues; rGeometry.ShapeFunctionsValues(ShapeFunctionsValues, LocalCoordinates); for ( IndexType i = 0 ; i < rGeometry.size() ; ++i ) { noalias( rResult ) += ShapeFunctionsValues( i ) * rGeometry.GetPoint( i ).GetInitialPosition(); } return rResult; } /** * Interpolation on element */ double CalculateOnPoint( const Variable<double>& rVariable, double& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates ) { Vector N; pElement->GetGeometry().ShapeFunctionsValues(N, rCoordinates); rResult = 0.0; for(unsigned int i = 0; i < pElement->GetGeometry().size(); ++i) { double NodalValues = pElement->GetGeometry()[i].GetSolutionStepValue(rVariable); rResult += N( i ) * NodalValues; } return rResult; } /** * Interpolation on element */ Vector& CalculateOnPoint( const Variable<Vector>& rVariable, Vector& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates ) { Vector N; pElement->GetGeometry().ShapeFunctionsValues(N, rCoordinates); for(unsigned int i = 0; i < pElement->GetGeometry().size(); ++i) { Vector& NodalValues = pElement->GetGeometry()[i].GetSolutionStepValue(rVariable); if(i == 0) { rResult = N( i ) * NodalValues; } else { noalias(rResult) += N( i ) * NodalValues; } } return rResult; } /** * Interpolation on element */ array_1d<double, 3>& CalculateOnPoint( const Variable<array_1d<double, 3> >& rVariable, array_1d<double, 3>& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates ) { Vector N; pElement->GetGeometry().ShapeFunctionsValues(N, rCoordinates); rResult[0] = 0.0; rResult[1] = 0.0; rResult[2] = 0.0; for(unsigned int i = 0; i < pElement->GetGeometry().size(); ++i) { array_1d<double, 3> NodalValues = pElement->GetGeometry()[i].GetSolutionStepValue(rVariable); rResult += N( i ) * NodalValues; } return rResult; } /** * Transfer variable at integration points to nodes * * @param pSolver the solver used for solving the local system matrix * @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes * @param rThisVariable the variable need to transfer the respected values */ void TransferVariablesToNodes( LinearSolverType::Pointer& pSolver, ModelPart::Pointer& pModelPart, const Variable<double>& rThisVariable ) { ElementsArrayType& ElementsArray= pModelPart->Elements(); //Initialize system of equations int NumberOfNodes = pModelPart->NumberOfNodes(); SerialSparseSpaceType::MatrixType M(NumberOfNodes, NumberOfNodes); noalias(M)= ZeroMatrix(NumberOfNodes, NumberOfNodes); SerialSparseSpaceType::VectorType g(NumberOfNodes); noalias(g)= ZeroVector(NumberOfNodes); SerialSparseSpaceType::VectorType b(NumberOfNodes); noalias(b)= ZeroVector(NumberOfNodes); // create the structure for M a priori ConstructL2MatrixStructure<Element>(M, ElementsArray); // Transfer of GaussianVariables to Nodal Variables via L_2-Minimization // see Jiao + Heath "Common-refinement-based data tranfer ..." // International Journal for numerical methods in engineering 61 (2004) 2402--2427 // for general description of L_2-Minimization // set up the system of equations //create a partition of the element array int number_of_threads = omp_get_max_threads(); std::vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, ElementsArray.size(), element_partition); KRATOS_WATCH( number_of_threads ) std::cout << "element_partition:"; for (std::size_t i = 0; i < element_partition.size(); ++i) std::cout << " " << element_partition[i]; std::cout << std::endl; //create the array of lock std::vector< omp_lock_t > lock_array(NumberOfNodes); for(unsigned int i = 0; i < NumberOfNodes; ++i) omp_init_lock(&lock_array[i]); #pragma omp parallel for for(int k = 0; k < number_of_threads; ++k) { Matrix InvJ(3, 3); double DetJ; unsigned int row, col; typename ElementsArrayType::ptr_iterator it_begin = ElementsArray.ptr_begin() + element_partition[k]; typename ElementsArrayType::ptr_iterator it_end = ElementsArray.ptr_begin() + element_partition[k + 1]; for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it ) { if(!(*it)->GetValue(IS_INACTIVE)) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); // J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); // const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); IsogeometricGeometryType& rIsogeometricGeometry = dynamic_cast<IsogeometricGeometryType&>((*it)->GetGeometry()); J = rIsogeometricGeometry.Jacobian0(J, (*it)->GetIntegrationMethod()); GeometryType::ShapeFunctionsGradientsType DN_De; Matrix Ncontainer; rIsogeometricGeometry.CalculateShapeFunctionsIntegrationPointsValuesAndLocalGradients( Ncontainer, DN_De, (*it)->GetIntegrationMethod() ); // get the values at the integration_points std::vector<double> ValuesOnIntPoint(integration_points.size()); (*it)->CalculateOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, pModelPart->GetProcessInfo()); for(unsigned int point = 0; point< integration_points.size(); ++point) { MathUtils<double>::InvertMatrix(J[point], InvJ, DetJ); double dV = DetJ * integration_points[point].Weight(); for(unsigned int prim = 0 ; prim < (*it)->GetGeometry().size(); ++prim) { row = (*it)->GetGeometry()[prim].Id()-1; omp_set_lock(&lock_array[row]); b(row) += (ValuesOnIntPoint[point]) * Ncontainer(point, prim) * dV; for(unsigned int sec = 0 ; sec < (*it)->GetGeometry().size(); ++sec) { col = (*it)->GetGeometry()[sec].Id()-1; M(row, col) += Ncontainer(point, prim) * Ncontainer(point, sec) * dV; } omp_unset_lock(&lock_array[row]); } } } else { // for inactive elements the contribution to LHS is identity matrix and RHS is zero for(unsigned int prim = 0 ; prim < (*it)->GetGeometry().size(); ++prim) { row = (*it)->GetGeometry()[prim].Id()-1; omp_set_lock(&lock_array[row]); // b(row) += 0.0; for(unsigned int sec = 0 ; sec < (*it)->GetGeometry().size(); ++sec) { col = (*it)->GetGeometry()[sec].Id()-1; if(col == row) M(row, col) += 1.0; // else // M(row, col) += 0.0; } omp_unset_lock(&lock_array[row]); } } } } for(unsigned int i = 0; i < NumberOfNodes; ++i) omp_destroy_lock(&lock_array[i]); // solver the system pSolver->Solve(M, g, b); // transfer the solution to the nodal variables for(ModelPart::NodeIterator it = pModelPart->NodesBegin(); it != pModelPart->NodesEnd(); ++it) { it->GetSolutionStepValue(rThisVariable) = g((it->Id()-1)); } } /** * Transfer of rThisVariable defined on integration points to corresponding * nodal values. The transformation is done in a form that ensures a minimization * of L_2-norm error (/sum{rThisVariable- f(x)) whereas * f(x)= /sum{shape_func_i*rThisVariable_i} * @param model_part model_part on which the transfer should be done * @param rThisVariable Matrix-Variable which should be transferred * @see TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Matrix>& rThisVariable) * @see TransferVariablesToNodes(ModelPart& model_part, Variable<double>& rThisVariable) * @ref Jiao&Heath: "Common-refinement-based data transfer...", Int. * Journal for numer. meth. in eng. 61 (2004) 2402--2427 * WARNING: this may cause segmentation faults as the respective variables * will be created on nodal level while they are originally intended to be * stored on integration points! * * @param pSolver the solver used for solving the local system matrix * @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes * @param rThisVariable the variable need to transfer the respected values */ void TransferVariablesToNodes( LinearSolverType::Pointer& pSolver, ModelPart::Pointer& pModelPart, const Variable<Vector>& rThisVariable ) { ElementsArrayType& ElementsArray = pModelPart->Elements(); const unsigned int& Dim = (*(ElementsArray.ptr_begin()))->GetGeometry().WorkingSpaceDimension(); unsigned int VariableSize; if(rThisVariable.Name() == std::string("STRESSES") || rThisVariable.Name() == std::string("PLASTIC_STRAIN_VECTOR") || rThisVariable.Name() == std::string("PRESTRESS") || rThisVariable.Name() == std::string("STRAIN") // TODO: extend for more variables ) { VariableSize = Dim * (Dim + 1) / 2; } else KRATOS_THROW_ERROR(std::logic_error, rThisVariable.Name(), " is not a supported variable for TransferVariablesToNodes routine.") #ifdef ENABLE_PROFILING //profiling variables double start_compute, end_compute; start_compute = OpenMPUtils::GetCurrentTime(); #endif //Initialize system of equations unsigned int NumberOfNodes = pModelPart->NumberOfNodes(); SerialSparseSpaceType::MatrixType M(NumberOfNodes, NumberOfNodes); noalias(M)= ZeroMatrix(NumberOfNodes, NumberOfNodes); // create the structure for M a priori ConstructL2MatrixStructure<Element>(M, ElementsArray); #ifdef ENABLE_PROFILING end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "ConstructMatrixStructure completed: " << end_compute - start_compute << " s" << std::endl; start_compute = end_compute; #endif SerialDenseSpaceType::MatrixType g(NumberOfNodes, VariableSize); noalias(g)= ZeroMatrix(NumberOfNodes, VariableSize); SerialDenseSpaceType::MatrixType b(NumberOfNodes, VariableSize); noalias(b)= ZeroMatrix(NumberOfNodes, VariableSize); std::vector< omp_lock_t > lock_array(NumberOfNodes); for(unsigned int i = 0; i < NumberOfNodes; ++i) omp_init_lock(&lock_array[i]); //create a partition of the element array int number_of_threads = omp_get_max_threads(); std::vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, ElementsArray.size(), element_partition); KRATOS_WATCH( number_of_threads ) std::cout << "element_partition:"; for (std::size_t i = 0; i < element_partition.size(); ++i) std::cout << " " << element_partition[i]; std::cout << std::endl; #pragma omp parallel for for(int k = 0; k < number_of_threads; ++k) { Matrix InvJ(Dim, Dim); double DetJ; unsigned int row, col; typename ElementsArrayType::ptr_iterator it_begin = ElementsArray.ptr_begin() + element_partition[k]; typename ElementsArrayType::ptr_iterator it_end = ElementsArray.ptr_begin() + element_partition[k + 1]; for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it ) { if(!(*it)->GetValue(IS_INACTIVE)) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); // J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); // const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); IsogeometricGeometryType& rIsogeometricGeometry = dynamic_cast<IsogeometricGeometryType&>((*it)->GetGeometry()); J = rIsogeometricGeometry.Jacobian0(J, (*it)->GetIntegrationMethod()); GeometryType::ShapeFunctionsGradientsType DN_De; Matrix Ncontainer; rIsogeometricGeometry.CalculateShapeFunctionsIntegrationPointsValuesAndLocalGradients( Ncontainer, DN_De, (*it)->GetIntegrationMethod() ); // get the values at the integration_points std::vector<Vector> ValuesOnIntPoint(integration_points.size()); (*it)->CalculateOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, pModelPart->GetProcessInfo()); for(unsigned int point = 0; point < integration_points.size(); ++point) { MathUtils<double>::InvertMatrix(J[point], InvJ, DetJ); double dV = DetJ * integration_points[point].Weight(); for(unsigned int prim = 0; prim < (*it)->GetGeometry().size(); ++prim) { row = (*it)->GetGeometry()[prim].Id() - 1; omp_set_lock(&lock_array[row]); for(unsigned int i = 0; i < VariableSize; ++i) b(row, i) += ValuesOnIntPoint[point][i] * Ncontainer(point, prim) * dV; for(unsigned int sec = 0; sec < (*it)->GetGeometry().size(); ++sec) { col = (*it)->GetGeometry()[sec].Id() - 1; M(row, col) += Ncontainer(point, prim) * Ncontainer(point, sec) * dV; } omp_unset_lock(&lock_array[row]); } } } else { // for inactive elements the contribution to LHS is identity matrix and RHS is zero for(unsigned int prim = 0; prim < (*it)->GetGeometry().size(); ++prim) { row = (*it)->GetGeometry()[prim].Id() - 1; omp_set_lock(&lock_array[row]); // for(unsigned int i = 0; i < VariableSize; ++i) // b(row, i) += 0.0; for(unsigned int sec = 0; sec < (*it)->GetGeometry().size(); ++sec) { col = (*it)->GetGeometry()[sec].Id() - 1; if(col == row) M(row, col) += 1.0; // else // M(row, col) += 0.0; } omp_unset_lock(&lock_array[row]); } } } } for(unsigned int i = 0; i < NumberOfNodes; ++i) omp_destroy_lock(&lock_array[i]); #ifdef ENABLE_PROFILING end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Assemble the matrix completed: " << end_compute - start_compute << " s" << std::endl; start_compute = end_compute; #endif #ifdef DEBUG_MULTISOLVE KRATOS_WATCH(M) KRATOS_WATCH(b) KRATOS_WATCH(*pSolver) #endif // solve the system // solver must support the multisove method pSolver->Solve(M, g, b); #ifdef DEBUG_MULTISOLVE KRATOS_WATCH(g) #endif // transfer the solution to the nodal variables for(ModelPart::NodeIterator it = pModelPart->NodesBegin(); it != pModelPart->NodesEnd(); ++it) { Vector tmp(VariableSize); for(unsigned int i = 0; i < VariableSize; ++i) { tmp(i) = g((it->Id()-1), i); } it->GetSolutionStepValue(rThisVariable) = tmp; } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. BezierClassicalPostUtility& operator=(BezierClassicalPostUtility const& rOther) { return *this; } /// Copy constructor. BezierClassicalPostUtility(BezierClassicalPostUtility const& rOther) { } ///@} }; // Class BezierClassicalPostUtility ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function inline std::istream& operator >>(std::istream& rIStream, BezierClassicalPostUtility& rThis) { return rIStream; } /// output stream function inline std::ostream& operator <<(std::ostream& rOStream, const BezierClassicalPostUtility& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block }// namespace Kratos. #undef DEBUG_LEVEL1 #undef DEBUG_LEVEL2 #undef DEBUG_MULTISOLVE #undef DEBUG_GENERATE_MESH #undef ENABLE_PROFILING #endif
LAGraph_dense_relabel.c
//------------------------------------------------------------------------------ // LAGraph_dense_relabel: dense relabeling of ids to matrix indices //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2019 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ //------------------------------------------------------------------------------ // LAGraph_dense_relabel: relabel sparse IDs to dense row/column indices // Contributed by Marton Elekes and Gabor Szarnyas, // Budapest University of Technology and Economics // (with accented characters: M\'{a}rton Elekes and G\'{a}bor Sz\'{a}rnyas. // Converts array of sparse IDs (ids) to row/column indices between 0...(nids-1). // The order of IDs is kept, therefore ids can be used for index -> ID conversion: ids[index]=id. // // Gives back two binary matrices for conversion between ID- and index-based vertices. // id2index vector can be used to look up for indices of chosen IDs. // id_dimension gives back the height of Id2index matrix and id2index vector. (Same as width of Index2id_handle matrix.) // id_dimension is the size that can store the largest ID in the array. // Currently it is the largest valid dimension in SuiteSparse:GraphBLAS (GxB_INDEX_MAX) // // Find usage example in /Test/DenseRelabel/dense_relabel_test.c #include "LAGraph_internal.h" #include <string.h> #define LAGRAPH_FREE_ALL \ { \ GrB_free (Id2index_handle) ; \ GrB_free (Index2id_handle) ; \ GrB_free (id2index_handle) ; \ LAGRAPH_FREE (indices) ; \ LAGRAPH_FREE (true_values) ; \ } //------------------------------------------------------------------------------ GrB_Info LAGraph_dense_relabel // relabel sparse IDs to dense row/column indices ( GrB_Matrix *Id2index_handle, // output matrix: A(id, index)=1 (unfilled if NULL) GrB_Matrix *Index2id_handle, // output matrix: B(index, id)=1 (unfilled if NULL) GrB_Vector *id2index_handle, // output vector: v(id)=index (unfilled if NULL) const GrB_Index *ids, // array of unique identifiers (under GxB_INDEX_MAX) GrB_Index nids, // number of identifiers GrB_Index *id_dimension // number of rows in Id2index matrix, id2index vector (unfilled if NULL) ) { GrB_Index *indices = NULL; bool *true_values = NULL; // from LAGraph_1_to_n.c int nthreads = LAGraph_get_nthreads(); nthreads = LAGRAPH_MIN ((int64_t) (nids / 4096), nthreads); nthreads = LAGRAPH_MAX (nthreads, 1); //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- if (!Id2index_handle && !Index2id_handle && !id2index_handle) { LAGRAPH_ERROR ("All output mapping arguments are NULL", GrB_NULL_POINTER); } if (!ids) { LAGRAPH_ERROR ("ids is NULL", GrB_NULL_POINTER); } // the largest valid dimension in SuiteSparse:GraphBLAS GrB_Index id_max_dimension = GxB_INDEX_MAX; if (id_dimension) *id_dimension = id_max_dimension; // set indices 0..(nids-1) indices = LAGraph_malloc(nids, sizeof(*indices)); if (!indices) { LAGRAPH_ERROR ("Out of Memory", GrB_OUT_OF_MEMORY); } #pragma omp parallel for num_threads(nthreads) schedule(static) for (size_t i = 0; i < nids; ++i) { indices[i] = i; } // build vector id2index(original_id) = index if (id2index_handle) { LAGr_Vector_new(id2index_handle, GrB_UINT64, id_max_dimension); LAGr_Vector_build(*id2index_handle, ids, indices, nids, GrB_SECOND_UINT64); } if (Id2index_handle || Index2id_handle) { // initialize true values of the matrix true_values = LAGraph_malloc(nids, sizeof(*true_values)); if (!true_values) { LAGRAPH_ERROR ("Out of Memory", GrB_OUT_OF_MEMORY); } memset(true_values, true, nids * sizeof(*true_values)); // build matrix Index2id(index, original_id) = 1 if (Index2id_handle) { LAGr_Matrix_new(Index2id_handle, GrB_BOOL, nids, id_max_dimension); LAGr_Matrix_build(*Index2id_handle, indices, ids, true_values, nids, GrB_SECOND_UINT64); } // build matrix Id2index(original_id, index) = 1 if (Id2index_handle) { LAGr_Matrix_new(Id2index_handle, GrB_BOOL, id_max_dimension, nids); LAGr_Matrix_build(*Id2index_handle, ids, indices, true_values, nids, GrB_SECOND_UINT64); } } LAGRAPH_FREE(indices); LAGRAPH_FREE(true_values); return GrB_SUCCESS; }
//------------------------------------------------------------------------------ // LAGraph_dense_relabel: dense relabeling of ids to matrix indices //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2019 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ //------------------------------------------------------------------------------ // LAGraph_dense_relabel: relabel sparse IDs to dense row/column indices // Contributed by Marton Elekes and Gabor Szarnyas, // Budapest University of Technology and Economics // (with accented characters: M\'{a}rton Elekes and G\'{a}bor Sz\'{a}rnyas. // Converts array of sparse IDs (ids) to row/column indices between 0...(nids-1). // The order of IDs is kept, therefore ids can be used for index -> ID conversion: ids[index]=id. // // Gives back two binary matrices for conversion between ID- and index-based vertices. // id2index vector can be used to look up for indices of chosen IDs. // id_dimension gives back the height of Id2index matrix and id2index vector. (Same as width of Index2id_handle matrix.) // id_dimension is the size that can store the largest ID in the array. // Currently it is the largest valid dimension in SuiteSparse:GraphBLAS (GxB_INDEX_MAX) // // Find usage example in /Test/DenseRelabel/dense_relabel_test.c #include "LAGraph_internal.h" #include <string.h> #define LAGRAPH_FREE_ALL \ { \ GrB_free (Id2index_handle) ; \ GrB_free (Index2id_handle) ; \ GrB_free (id2index_handle) ; \ LAGRAPH_FREE (indices) ; \ LAGRAPH_FREE (true_values) ; \ } //------------------------------------------------------------------------------ GrB_Info LAGraph_dense_relabel // relabel sparse IDs to dense row/column indices ( GrB_Matrix *Id2index_handle, // output matrix: A(id, index)=1 (unfilled if NULL) GrB_Matrix *Index2id_handle, // output matrix: B(index, id)=1 (unfilled if NULL) GrB_Vector *id2index_handle, // output vector: v(id)=index (unfilled if NULL) const GrB_Index *ids, // array of unique identifiers (under GxB_INDEX_MAX) GrB_Index nids, // number of identifiers GrB_Index *id_dimension // number of rows in Id2index matrix, id2index vector (unfilled if NULL) ) { GrB_Index *indices = NULL; bool *true_values = NULL; // from LAGraph_1_to_n.c int nthreads = LAGraph_get_nthreads(); nthreads = LAGRAPH_MIN ((int64_t) (nids / 4096), nthreads); nthreads = LAGRAPH_MAX (nthreads, 1); //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- if (!Id2index_handle && !Index2id_handle && !id2index_handle) { LAGRAPH_ERROR ("All output mapping arguments are NULL", GrB_NULL_POINTER); } if (!ids) { LAGRAPH_ERROR ("ids is NULL", GrB_NULL_POINTER); } // the largest valid dimension in SuiteSparse:GraphBLAS GrB_Index id_max_dimension = GxB_INDEX_MAX; if (id_dimension) *id_dimension = id_max_dimension; // set indices 0..(nids-1) indices = LAGraph_malloc(nids, sizeof(*indices)); if (!indices) { LAGRAPH_ERROR ("Out of Memory", GrB_OUT_OF_MEMORY); } for (size_t i = 0; i < nids; ++i) { indices[i] = i; } // build vector id2index(original_id) = index if (id2index_handle) { LAGr_Vector_new(id2index_handle, GrB_UINT64, id_max_dimension); LAGr_Vector_build(*id2index_handle, ids, indices, nids, GrB_SECOND_UINT64); } if (Id2index_handle || Index2id_handle) { // initialize true values of the matrix true_values = LAGraph_malloc(nids, sizeof(*true_values)); if (!true_values) { LAGRAPH_ERROR ("Out of Memory", GrB_OUT_OF_MEMORY); } memset(true_values, true, nids * sizeof(*true_values)); // build matrix Index2id(index, original_id) = 1 if (Index2id_handle) { LAGr_Matrix_new(Index2id_handle, GrB_BOOL, nids, id_max_dimension); LAGr_Matrix_build(*Index2id_handle, indices, ids, true_values, nids, GrB_SECOND_UINT64); } // build matrix Id2index(original_id, index) = 1 if (Id2index_handle) { LAGr_Matrix_new(Id2index_handle, GrB_BOOL, id_max_dimension, nids); LAGr_Matrix_build(*Id2index_handle, ids, indices, true_values, nids, GrB_SECOND_UINT64); } } LAGRAPH_FREE(indices); LAGRAPH_FREE(true_values); return GrB_SUCCESS; }
//------------------------------------------------------------------------------ // LAGraph_dense_relabel: dense relabeling of ids to matrix indices //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2019 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ //------------------------------------------------------------------------------ // LAGraph_dense_relabel: relabel sparse IDs to dense row/column indices // Contributed by Marton Elekes and Gabor Szarnyas, // Budapest University of Technology and Economics // (with accented characters: M\'{a}rton Elekes and G\'{a}bor Sz\'{a}rnyas. // Converts array of sparse IDs (ids) to row/column indices between 0...(nids-1). // The order of IDs is kept, therefore ids can be used for index -> ID conversion: ids[index]=id. // // Gives back two binary matrices for conversion between ID- and index-based vertices. // id2index vector can be used to look up for indices of chosen IDs. // id_dimension gives back the height of Id2index matrix and id2index vector. (Same as width of Index2id_handle matrix.) // id_dimension is the size that can store the largest ID in the array. // Currently it is the largest valid dimension in SuiteSparse:GraphBLAS (GxB_INDEX_MAX) // // Find usage example in /Test/DenseRelabel/dense_relabel_test.c #include "LAGraph_internal.h" #include <string.h> #define LAGRAPH_FREE_ALL \ { \ GrB_free (Id2index_handle) ; \ GrB_free (Index2id_handle) ; \ GrB_free (id2index_handle) ; \ LAGRAPH_FREE (indices) ; \ LAGRAPH_FREE (true_values) ; \ } //------------------------------------------------------------------------------ GrB_Info LAGraph_dense_relabel // relabel sparse IDs to dense row/column indices ( GrB_Matrix *Id2index_handle, // output matrix: A(id, index)=1 (unfilled if NULL) GrB_Matrix *Index2id_handle, // output matrix: B(index, id)=1 (unfilled if NULL) GrB_Vector *id2index_handle, // output vector: v(id)=index (unfilled if NULL) const GrB_Index *ids, // array of unique identifiers (under GxB_INDEX_MAX) GrB_Index nids, // number of identifiers GrB_Index *id_dimension // number of rows in Id2index matrix, id2index vector (unfilled if NULL) ) { GrB_Index *indices = NULL; bool *true_values = NULL; // from LAGraph_1_to_n.c int nthreads = LAGraph_get_nthreads(); nthreads = LAGRAPH_MIN ((int64_t) (nids / 4096), nthreads); nthreads = LAGRAPH_MAX (nthreads, 1); //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- if (!Id2index_handle && !Index2id_handle && !id2index_handle) { LAGRAPH_ERROR ("All output mapping arguments are NULL", GrB_NULL_POINTER); } if (!ids) { LAGRAPH_ERROR ("ids is NULL", GrB_NULL_POINTER); } // the largest valid dimension in SuiteSparse:GraphBLAS GrB_Index id_max_dimension = GxB_INDEX_MAX; if (id_dimension) *id_dimension = id_max_dimension; // set indices 0..(nids-1) indices = LAGraph_malloc(nids, sizeof(*indices)); if (!indices) { LAGRAPH_ERROR ("Out of Memory", GrB_OUT_OF_MEMORY); } #pragma omp parallel for num_threads(nthreads) schedule(static) for (size_t i = 0; i < nids; ++i) { indices[i] = i; } // build vector id2index(original_id) = index if (id2index_handle) { LAGr_Vector_new(id2index_handle, GrB_UINT64, id_max_dimension); LAGr_Vector_build(*id2index_handle, ids, indices, nids, GrB_SECOND_UINT64); } if (Id2index_handle || Index2id_handle) { // initialize true values of the matrix true_values = LAGraph_malloc(nids, sizeof(*true_values)); if (!true_values) { LAGRAPH_ERROR ("Out of Memory", GrB_OUT_OF_MEMORY); } memset(true_values, true, nids * sizeof(*true_values)); // build matrix Index2id(index, original_id) = 1 if (Index2id_handle) { LAGr_Matrix_new(Index2id_handle, GrB_BOOL, nids, id_max_dimension); LAGr_Matrix_build(*Index2id_handle, indices, ids, true_values, nids, GrB_SECOND_UINT64); } // build matrix Id2index(original_id, index) = 1 if (Id2index_handle) { LAGr_Matrix_new(Id2index_handle, GrB_BOOL, id_max_dimension, nids); LAGr_Matrix_build(*Id2index_handle, ids, indices, true_values, nids, GrB_SECOND_UINT64); } } LAGRAPH_FREE(indices); LAGRAPH_FREE(true_values); return GrB_SUCCESS; }
Graph.h
#ifndef BasicGraph #define BasicGraph /* * Graph.h: * manage nodes in a neural network model * * Created on: Apr 21, 2017 * Author: mszhang */ #include "Eigen/Dense" #include "Node.h" #include "MyLib.h" using namespace Eigen; // one Node means a vector // the col should be 1, because we aimed for NLP only class Graph { protected: vector<PExecute> execs; //backward vector<PNode> nodes; //forward vector<PNode> free_nodes; vector<PNode> finish_nodes; vector<PNode> all_nodes; public: bool train; dtype drop_factor; public: Graph() { execs.clear(); execs.clear(); nodes.clear(); free_nodes.clear(); drop_factor = 1.0; } virtual ~Graph() { int count = execs.size(); for (int idx = 0; idx < count; idx++) { delete execs[idx]; } execs.clear(); execs.clear(); nodes.clear(); free_nodes.clear(); } inline void setDropFactor(dtype cur_drop_factor) { drop_factor = cur_drop_factor; if (drop_factor <= 0) drop_factor = 0; if (drop_factor >= 1.0) drop_factor = 1.0; } public: inline void clearValue(const bool& bTrain = false) { int count = execs.size(); for (int idx = 0; idx < count; idx++) { delete execs[idx]; } execs.clear(); count = nodes.size(); for (int idx = 0; idx < count; idx++) { nodes[idx]->clearValue(); } nodes.clear(); free_nodes.clear(); finish_nodes.clear(); all_nodes.clear(); train = bTrain; } inline void backward() { int count = execs.size(); for (int idx = count - 1; idx >= 0; idx--) { execs[idx]->backward(); } } inline void addNode(PNode x) { nodes.push_back(x); if (x->degree == 0) { free_nodes.push_back(x); } all_nodes.push_back(x); } //real executation inline void compute() { int free_count = free_nodes.size(); while (free_count > 0) { vector<PExecute> cur_execs; int cur_execs_size = 0; for (int idx = 0; idx < free_count; idx++) { bool find = false; for (int idy = 0; idy < cur_execs_size; idy++) { if (cur_execs[idy]->addNode(free_nodes[idx])) { find = true; break; } } if (!find) { PExecute new_exec = free_nodes[idx]->generate(train, drop_factor); cur_execs.push_back(new_exec); cur_execs_size++; } } //execute //#pragma omp parallel for for (int idy = 0; idy < cur_execs_size; idy++) { cur_execs[idy]->forward(); } for (int idy = 0; idy < cur_execs_size; idy++) { execs.push_back(cur_execs[idy]); } //finished nodes vector<PNode> new_free_nodes; for (int idx = 0; idx < free_count; idx++) { finish_nodes.push_back(free_nodes[idx]); int parent_count = free_nodes[idx]->parents.size(); for (int idy = 0; idy < parent_count; idy++) { free_nodes[idx]->parents[idy]->degree--; if (free_nodes[idx]->parents[idy]->degree == 0) { new_free_nodes.push_back(free_nodes[idx]->parents[idy]); } } } // update free nodes free_nodes.clear(); free_count = new_free_nodes.size(); for (int idx = 0; idx < free_count; idx++) { free_nodes.push_back(new_free_nodes[idx]); } } if (finish_nodes.size() != all_nodes.size()) { std::cout << "error: several nodes are not executed, finished: " << finish_nodes.size() << ", all: " << all_nodes.size() << std::endl; int total_node_num = all_nodes.size(); int unprocessed = 0; for (int idx = 0; idx < total_node_num; idx++) { PNode curNode = all_nodes[idx]; if (curNode->degree >= 0) { curNode->typeEqual(all_nodes[0]); unprocessed++; } } std::cout << "unprocessed: " << unprocessed << std::endl; } } }; // one very useful function to collect pointers of derived nodes template<typename DerivedNode> inline vector<PNode> getPNodes(vector<DerivedNode>& inputs, int size) { int usedSize = inputs.size(); if (size >= 0 && size < usedSize) usedSize = size; vector<PNode> pnodes; for (int idx = 0; idx < usedSize; idx++) { pnodes.push_back(&(inputs[idx])); } return pnodes; } template<typename DerivedNode> inline vector<PNode> getPNodes(DerivedNode inputs[], int size) { //int usedSize = inputs.; //if (size >= 0 && size < usedSize) usedSize = size; int usedSize = size; vector<PNode> pnodes; for (int idx = 0; idx < usedSize; idx++) { pnodes.push_back(&(inputs[idx])); } return pnodes; } template<typename DerivedNode> inline vector<PNode> getPNodes(vector<DerivedNode>& inputs, int start, int length) { int end, tmp_end = start + length; if (tmp_end > inputs.size()) end = inputs.size(); else end = tmp_end; //if (size >= 0 && size < usedSize) usedSize = size; vector<PNode> pnodes; for (int idx = start; idx < end; idx++) { pnodes.push_back(&(inputs[idx])); } return pnodes; } template<typename DerivedNode> inline vector<PNode> getPNodes(DerivedNode inputs[], int size, int start, int length) { int end, tmp_end = start + length; if (tmp_end > size) end = size; else end = tmp_end; //if (size >= 0 && size < usedSize) usedSize = size; vector<PNode> pnodes; for (int idx = start; idx < end; idx++) { pnodes.push_back(&(inputs[idx])); } return pnodes; } #endif
#ifndef BasicGraph #define BasicGraph /* * Graph.h: manage nodes in a neural network model * * Created on: Apr 21, 2017 Author: mszhang */ #include "Eigen/Dense" #include "Node.h" #include "MyLib.h" using namespace Eigen; //one Node means a vector // the col should be 1, because we aimed for NLP only class Graph { protected: vector < PExecute > execs; //backward vector < PNode > nodes; //forward vector < PNode > free_nodes; vector < PNode > finish_nodes; vector < PNode > all_nodes; public: bool train; dtype drop_factor; public: Graph() { execs.clear(); execs.clear(); nodes.clear(); free_nodes.clear(); drop_factor = 1.0; } virtual ~ Graph() { int count = execs.size(); for (int idx = 0; idx < count; idx++) { delete execs[idx]; } execs.clear(); execs.clear(); nodes.clear(); free_nodes.clear(); } inline void setDropFactor(dtype cur_drop_factor) { drop_factor = cur_drop_factor; if (drop_factor <= 0) drop_factor = 0; if (drop_factor >= 1.0) drop_factor = 1.0; } public: inline void clearValue(const bool & bTrain = false){ int count = execs.size(); for (int idx = 0; idx < count; idx++) { delete execs[idx]; } execs.clear(); count = nodes.size(); for (int idx = 0; idx < count; idx++) { nodes[idx]->clearValue(); } nodes.clear(); free_nodes.clear(); finish_nodes.clear(); all_nodes.clear(); train = bTrain; } inline void backward() { int count = execs.size(); for (int idx = count - 1; idx >= 0; idx--) { execs[idx]->backward(); } } inline void addNode(PNode x) { nodes.push_back(x); if (x->degree == 0) { free_nodes.push_back(x); } all_nodes.push_back(x); } //real executation inline void compute() { int free_count = free_nodes.size(); while (free_count > 0) { vector < PExecute > cur_execs; int cur_execs_size = 0; for (int idx = 0; idx < free_count; idx++) { bool find = false; for (int idy = 0; idy < cur_execs_size; idy++) { if (cur_execs[idy]->addNode(free_nodes[idx])) { find = true; break; } } if (!find) { PExecute new_exec = free_nodes[idx]->generate(train, drop_factor); cur_execs.push_back(new_exec); cur_execs_size++; } } //execute // for (int idy = 0; idy < cur_execs_size; idy++) { cur_execs[idy]->forward(); } for (int idy = 0; idy < cur_execs_size; idy++) { execs.push_back(cur_execs[idy]); } //finished nodes vector < PNode > new_free_nodes; for (int idx = 0; idx < free_count; idx++) { finish_nodes.push_back(free_nodes[idx]); int parent_count = free_nodes[idx]->parents.size(); for (int idy = 0; idy < parent_count; idy++) { free_nodes[idx]->parents[idy]->degree--; if (free_nodes[idx]->parents[idy]->degree == 0) { new_free_nodes.push_back(free_nodes[idx]->parents[idy]); } } } //update free nodes free_nodes.clear(); free_count = new_free_nodes.size(); for (int idx = 0; idx < free_count; idx++) { free_nodes.push_back(new_free_nodes[idx]); } } if (finish_nodes.size() != all_nodes.size()) { std: : cout << "error: several nodes are not executed, finished: " << finish_nodes.size() << ", all: " << all_nodes.size() << std: :endl; int total_node_num = all_nodes.size(); int unprocessed = 0; for (int idx = 0; idx < total_node_num; idx++) { PNode curNode = all_nodes[idx]; if (curNode->degree >= 0) { curNode->typeEqual(all_nodes[0]); unprocessed++; } } std: : cout << "unprocessed: " << unprocessed << std: :endl; } } }; //one very useful function to collect pointers of derived nodes template < typename DerivedNode > inline vector < PNode > getPNodes(vector < DerivedNode > &inputs, int size) { int usedSize = inputs.size(); if (size >= 0 && size < usedSize) usedSize = size; vector < PNode > pnodes; for (int idx = 0; idx < usedSize; idx++) { pnodes.push_back(&(inputs[idx])); } return pnodes; } template < typename DerivedNode > inline vector < PNode > getPNodes(DerivedNode inputs[], int size) { //int usedSize = inputs.; //if (size >= 0 && size < usedSize) usedSize = size; int usedSize = size; vector < PNode > pnodes; for (int idx = 0; idx < usedSize; idx++) { pnodes.push_back(&(inputs[idx])); } return pnodes; } template < typename DerivedNode > inline vector < PNode > getPNodes(vector < DerivedNode > &inputs, int start, int length) { int end, tmp_end = start + length; if (tmp_end > inputs.size()) end = inputs.size(); else end = tmp_end; //if (size >= 0 && size < usedSize) usedSize = size; vector < PNode > pnodes; for (int idx = start; idx < end; idx++) { pnodes.push_back(&(inputs[idx])); } return pnodes; } template < typename DerivedNode > inline vector < PNode > getPNodes(DerivedNode inputs[], int size, int start, int length) { int end, tmp_end = start + length; if (tmp_end > size) end = size; else end = tmp_end; //if (size >= 0 && size < usedSize) usedSize = size; vector < PNode > pnodes; for (int idx = start; idx < end; idx++) { pnodes.push_back(&(inputs[idx])); } return pnodes; } #endif
#ifndef BasicGraph #define BasicGraph /* * Graph.h: manage nodes in a neural network model * * Created on: Apr 21, 2017 Author: mszhang */ #include "Eigen/Dense" #include "Node.h" #include "MyLib.h" using namespace Eigen; //one Node means a vector // the col should be 1, because we aimed for NLP only class Graph { protected: vector < PExecute > execs; //backward vector < PNode > nodes; //forward vector < PNode > free_nodes; vector < PNode > finish_nodes; vector < PNode > all_nodes; public: bool train; dtype drop_factor; public: Graph() { execs.clear(); execs.clear(); nodes.clear(); free_nodes.clear(); drop_factor = 1.0; } virtual ~ Graph() { int count = execs.size(); for (int idx = 0; idx < count; idx++) { delete execs[idx]; } execs.clear(); execs.clear(); nodes.clear(); free_nodes.clear(); } inline void setDropFactor(dtype cur_drop_factor) { drop_factor = cur_drop_factor; if (drop_factor <= 0) drop_factor = 0; if (drop_factor >= 1.0) drop_factor = 1.0; } public: inline void clearValue(const bool & bTrain = false){ int count = execs.size(); for (int idx = 0; idx < count; idx++) { delete execs[idx]; } execs.clear(); count = nodes.size(); for (int idx = 0; idx < count; idx++) { nodes[idx]->clearValue(); } nodes.clear(); free_nodes.clear(); finish_nodes.clear(); all_nodes.clear(); train = bTrain; } inline void backward() { int count = execs.size(); for (int idx = count - 1; idx >= 0; idx--) { execs[idx]->backward(); } } inline void addNode(PNode x) { nodes.push_back(x); if (x->degree == 0) { free_nodes.push_back(x); } all_nodes.push_back(x); } //real executation inline void compute() { int free_count = free_nodes.size(); while (free_count > 0) { vector < PExecute > cur_execs; int cur_execs_size = 0; for (int idx = 0; idx < free_count; idx++) { bool find = false; for (int idy = 0; idy < cur_execs_size; idy++) { if (cur_execs[idy]->addNode(free_nodes[idx])) { find = true; break; } } if (!find) { PExecute new_exec = free_nodes[idx]->generate(train, drop_factor); cur_execs.push_back(new_exec); cur_execs_size++; } } //execute // #pragma omp parallel for for (int idy = 0; idy < cur_execs_size; idy++) { cur_execs[idy]->forward(); } for (int idy = 0; idy < cur_execs_size; idy++) { execs.push_back(cur_execs[idy]); } //finished nodes vector < PNode > new_free_nodes; for (int idx = 0; idx < free_count; idx++) { finish_nodes.push_back(free_nodes[idx]); int parent_count = free_nodes[idx]->parents.size(); for (int idy = 0; idy < parent_count; idy++) { free_nodes[idx]->parents[idy]->degree--; if (free_nodes[idx]->parents[idy]->degree == 0) { new_free_nodes.push_back(free_nodes[idx]->parents[idy]); } } } //update free nodes free_nodes.clear(); free_count = new_free_nodes.size(); for (int idx = 0; idx < free_count; idx++) { free_nodes.push_back(new_free_nodes[idx]); } } if (finish_nodes.size() != all_nodes.size()) { std: : cout << "error: several nodes are not executed, finished: " << finish_nodes.size() << ", all: " << all_nodes.size() << std: :endl; int total_node_num = all_nodes.size(); int unprocessed = 0; for (int idx = 0; idx < total_node_num; idx++) { PNode curNode = all_nodes[idx]; if (curNode->degree >= 0) { curNode->typeEqual(all_nodes[0]); unprocessed++; } } std: : cout << "unprocessed: " << unprocessed << std: :endl; } } }; //one very useful function to collect pointers of derived nodes template < typename DerivedNode > inline vector < PNode > getPNodes(vector < DerivedNode > &inputs, int size) { int usedSize = inputs.size(); if (size >= 0 && size < usedSize) usedSize = size; vector < PNode > pnodes; for (int idx = 0; idx < usedSize; idx++) { pnodes.push_back(&(inputs[idx])); } return pnodes; } template < typename DerivedNode > inline vector < PNode > getPNodes(DerivedNode inputs[], int size) { //int usedSize = inputs.; //if (size >= 0 && size < usedSize) usedSize = size; int usedSize = size; vector < PNode > pnodes; for (int idx = 0; idx < usedSize; idx++) { pnodes.push_back(&(inputs[idx])); } return pnodes; } template < typename DerivedNode > inline vector < PNode > getPNodes(vector < DerivedNode > &inputs, int start, int length) { int end, tmp_end = start + length; if (tmp_end > inputs.size()) end = inputs.size(); else end = tmp_end; //if (size >= 0 && size < usedSize) usedSize = size; vector < PNode > pnodes; for (int idx = start; idx < end; idx++) { pnodes.push_back(&(inputs[idx])); } return pnodes; } template < typename DerivedNode > inline vector < PNode > getPNodes(DerivedNode inputs[], int size, int start, int length) { int end, tmp_end = start + length; if (tmp_end > size) end = size; else end = tmp_end; //if (size >= 0 && size < usedSize) usedSize = size; vector < PNode > pnodes; for (int idx = start; idx < end; idx++) { pnodes.push_back(&(inputs[idx])); } return pnodes; } #endif
GB_binop__pow_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__pow_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint16) // C=scalar+B GB (_bind1st__pow_uint16) // C=scalar+B' GB (_bind1st_tran__pow_uint16) // C=A+scalar GB (_bind2nd__pow_uint16) // C=A'+scalar GB (_bind2nd_tran__pow_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = GB_pow_uint16 (aij, bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_uint16 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT16 || GxB_NO_POW_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_uint16 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_uint16 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint16 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint16 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__pow_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint16) // C=scalar+B GB (_bind1st__pow_uint16) // C=scalar+B' GB (_bind1st_tran__pow_uint16) // C=A+scalar GB (_bind2nd__pow_uint16) // C=A'+scalar GB (_bind2nd_tran__pow_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = GB_pow_uint16 (aij, bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_uint16 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT16 || GxB_NO_POW_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_uint16 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_uint16 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint16 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint16 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__pow_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint16) // C=scalar+B GB (_bind1st__pow_uint16) // C=scalar+B' GB (_bind1st_tran__pow_uint16) // C=A+scalar GB (_bind2nd__pow_uint16) // C=A'+scalar GB (_bind2nd_tran__pow_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = GB_pow_uint16 (aij, bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_uint16 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT16 || GxB_NO_POW_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_uint16 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_uint16 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint16 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint16 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gesummv.c
/** * gesummv.c: This file was adapted from PolyBench/GPU 1.0 test * suite to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <omp.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #include "../../common/polybenchUtilFuncts.h" // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU 1 /* Problem size */ #define N 8192 /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 43532.0f #define BETA 12313.0f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void gesummv(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y) { for (int i = 0; i < N; i++) { DATA_TYPE tmp = 0; y[i] = 0; for (int j = 0; j < N; j++) { tmp = A[i * N + j] * x[j] + tmp; y[i] = B[i * N + j] * x[j] + y[i]; } y[i] = ALPHA * tmp + BETA * y[i]; } } void gesummv_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y) { #pragma omp target device(GPU) map(to : A[:N * N], B[:N * N], x[:N]) \ map(from : y[:N]) #pragma omp parallel for for (int i = 0; i < N; i++) { DATA_TYPE tmp = 0; y[i] = 0; for (int j = 0; j < N; j++) { tmp = A[i * N + j] * x[j] + tmp; y[i] = B[i * N + j] * x[j] + y[i]; } y[i] = ALPHA * tmp + BETA * y[i]; } } void init(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x) { int i, j; for (i = 0; i < N; i++) { x[i] = ((DATA_TYPE)i) / N; for (j = 0; j < N; j++) { A[i * N + j] = ((DATA_TYPE)i * j) / N; B[i * N + j] = ((DATA_TYPE)i * j) / N; } } } void compareResults(DATA_TYPE *y, DATA_TYPE *y_outputFromGpu) { int i, fail; fail = 0; for (i = 0; i < (N); i++) { if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } int main(int argc, char *argv[]) { double t_start, t_end; DATA_TYPE *A; DATA_TYPE *B; DATA_TYPE *x; DATA_TYPE *y; DATA_TYPE *y_outputFromGpu; A = (DATA_TYPE *)malloc(N * N * sizeof(DATA_TYPE)); B = (DATA_TYPE *)malloc(N * N * sizeof(DATA_TYPE)); x = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); y = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); fprintf(stdout, "<< Scalar, Vector and Matrix Multiplication >>\n"); init(A, B, x); t_start = rtclock(); gesummv_OMP(A, B, x, y_outputFromGpu); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); init(A, B, x); t_start = rtclock(); gesummv(A, B, x, y); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(y, y_outputFromGpu); free(A); free(B); free(x); free(y); free(y_outputFromGpu); return 0; }
/** * gesummv.c: This file was adapted from PolyBench/GPU 1.0 test * suite to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <omp.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #include "../../common/polybenchUtilFuncts.h" // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU 1 /* Problem size */ #define N 8192 /* * Declared constant values for ALPHA and BETA (same as values in * PolyBench 2.0) */ #define ALPHA 43532.0f #define BETA 12313.0f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void gesummv(DATA_TYPE * A, DATA_TYPE * B, DATA_TYPE * x, DATA_TYPE * y) { for (int i = 0; i < N; i++) { DATA_TYPE tmp = 0; y[i] = 0; for (int j = 0; j < N; j++) { tmp = A[i * N + j] * x[j] + tmp; y[i] = B[i * N + j] * x[j] + y[i]; } y[i] = ALPHA * tmp + BETA * y[i]; } } void gesummv_OMP(DATA_TYPE * A, DATA_TYPE * B, DATA_TYPE * x, DATA_TYPE * y) { map(from: y[:N]) for (int i = 0; i < N; i++) { DATA_TYPE tmp = 0; y[i] = 0; for (int j = 0; j < N; j++) { tmp = A[i * N + j] * x[j] + tmp; y[i] = B[i * N + j] * x[j] + y[i]; } y[i] = ALPHA * tmp + BETA * y[i]; } } void init(DATA_TYPE * A, DATA_TYPE * B, DATA_TYPE * x) { int i, j; for (i = 0; i < N; i++) { x[i] = ((DATA_TYPE) i) / N; for (j = 0; j < N; j++) { A[i * N + j] = ((DATA_TYPE) i * j) / N; B[i * N + j] = ((DATA_TYPE) i * j) / N; } } } void compareResults(DATA_TYPE * y, DATA_TYPE * y_outputFromGpu) { int i, fail; fail = 0; for (i = 0; i < (N); i++) { if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } //Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } int main(int argc, char *argv[]) { double t_start, t_end; DATA_TYPE *A; DATA_TYPE *B; DATA_TYPE *x; DATA_TYPE *y; DATA_TYPE *y_outputFromGpu; A = (DATA_TYPE *) malloc(N * N * sizeof(DATA_TYPE)); B = (DATA_TYPE *) malloc(N * N * sizeof(DATA_TYPE)); x = (DATA_TYPE *) malloc(N * sizeof(DATA_TYPE)); y = (DATA_TYPE *) malloc(N * sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE *) malloc(N * sizeof(DATA_TYPE)); fprintf(stdout, "<< Scalar, Vector and Matrix Multiplication >>\n"); init(A, B, x); t_start = rtclock(); gesummv_OMP(A, B, x, y_outputFromGpu); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); init(A, B, x); t_start = rtclock(); gesummv(A, B, x, y); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(y, y_outputFromGpu); free(A); free(B); free(x); free(y); free(y_outputFromGpu); return 0; }
/** * gesummv.c: This file was adapted from PolyBench/GPU 1.0 test * suite to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <omp.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #include "../../common/polybenchUtilFuncts.h" // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU 1 /* Problem size */ #define N 8192 /* * Declared constant values for ALPHA and BETA (same as values in * PolyBench 2.0) */ #define ALPHA 43532.0f #define BETA 12313.0f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void gesummv(DATA_TYPE * A, DATA_TYPE * B, DATA_TYPE * x, DATA_TYPE * y) { for (int i = 0; i < N; i++) { DATA_TYPE tmp = 0; y[i] = 0; for (int j = 0; j < N; j++) { tmp = A[i * N + j] * x[j] + tmp; y[i] = B[i * N + j] * x[j] + y[i]; } y[i] = ALPHA * tmp + BETA * y[i]; } } void gesummv_OMP(DATA_TYPE * A, DATA_TYPE * B, DATA_TYPE * x, DATA_TYPE * y) { #pragma omp target device(GPU) map(to : A[:N * N], B[:N * N], x[:N]) \ map(from : y[:N]) #pragma omp parallel for for (int i = 0; i < N; i++) { DATA_TYPE tmp = 0; y[i] = 0; for (int j = 0; j < N; j++) { tmp = A[i * N + j] * x[j] + tmp; y[i] = B[i * N + j] * x[j] + y[i]; } y[i] = ALPHA * tmp + BETA * y[i]; } } void init(DATA_TYPE * A, DATA_TYPE * B, DATA_TYPE * x) { int i, j; for (i = 0; i < N; i++) { x[i] = ((DATA_TYPE) i) / N; for (j = 0; j < N; j++) { A[i * N + j] = ((DATA_TYPE) i * j) / N; B[i * N + j] = ((DATA_TYPE) i * j) / N; } } } void compareResults(DATA_TYPE * y, DATA_TYPE * y_outputFromGpu) { int i, fail; fail = 0; for (i = 0; i < (N); i++) { if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } //Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } int main(int argc, char *argv[]) { double t_start, t_end; DATA_TYPE *A; DATA_TYPE *B; DATA_TYPE *x; DATA_TYPE *y; DATA_TYPE *y_outputFromGpu; A = (DATA_TYPE *) malloc(N * N * sizeof(DATA_TYPE)); B = (DATA_TYPE *) malloc(N * N * sizeof(DATA_TYPE)); x = (DATA_TYPE *) malloc(N * sizeof(DATA_TYPE)); y = (DATA_TYPE *) malloc(N * sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE *) malloc(N * sizeof(DATA_TYPE)); fprintf(stdout, "<< Scalar, Vector and Matrix Multiplication >>\n"); init(A, B, x); t_start = rtclock(); gesummv_OMP(A, B, x, y_outputFromGpu); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); init(A, B, x); t_start = rtclock(); gesummv(A, B, x, y); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(y, y_outputFromGpu); free(A); free(B); free(x); free(y); free(y_outputFromGpu); return 0; }
aux.c
#include "aux.h" int *stacks_list; int *stacks_cnts; int cnt, cnt2; long usecs (){ struct timeval t; gettimeofday(&t,NULL); return t.tv_sec*1000000+t.tv_usec; } void mysleep(double sec){ long s, e; s=0; e=0; s = usecs(); while(((double) e-s)/1000000 < sec) { e = usecs(); } return; } void init_stacks(stack_t **stacks, int n){ int i; *stacks = (stack_t*)malloc(n*sizeof(stack_t)); for(i=0; i<n; i++){ (*stacks)[i].cnt = 0; (*stacks)[i].elems = (int*)malloc(MAXELEMS*sizeof(int)); } stacks_list = (int*)malloc(MAXELEMS*sizeof(int)); stacks_cnts = (int*)malloc(MAXELEMS*sizeof(int)); for(i=0; i<MAXELEMS; i++){ stacks_cnts[i] = 0; stacks_list[i] = rand()%n; } cnt = 0; cnt2 = 0; } void free_stacks(stack_t **stacks, int n){ int i; for(i=0; i<n; i++){ (*stacks)[i].cnt = 0; free((*stacks)[i].elems); } free(*stacks); free(stacks_list); free(stacks_cnts); cnt = 0; } int get_random_stack(){ int c; #pragma omp atomic capture c = cnt++; if(c >= MAXELEMS){ return -1; } else { return stacks_list[c]; } } int process(){ int c; mysleep(0.0001); #pragma omp atomic capture c = cnt2++; return c; } void check_result(stack_t *stacks, int n){ int i, j; int *check; /* for(i=0; i<n; i++){ */ /* for(j=0; j<stacks[i].cnt; j++){ */ /* if(stacks[i].elems[j] != j){ */ /* printf("The result is false\n"); */ /* return; */ /* } */ /* } */ /* if(stacks[i].cnt != stacks_cnts[i]){ */ /* printf("The result is false\n"); */ /* return; */ /* } */ /* } */ /* for(i=0; i<MAXELEMS; i++) */ /* stacks_cnts[stacks_list[i]]--; */ /* for(i=0; i<n; i++){ */ /* if(stacks_cnts[i] != 0){ */ /* printf("The result is false\n"); */ /* return; */ /* } */ /* } */ for(i=0; i<n; i++) stacks_cnts[i] = stacks[i].cnt; for(i=0; i<MAXELEMS; i++) stacks_cnts[stacks_list[i]]--; for(i=0; i<n; i++){ if(stacks_cnts[i] != 0){ printf("The result is false\n"); return; } } check = (int*)malloc(MAXELEMS*sizeof(int)); for(i=0; i<MAXELEMS; i++) check[i] = 0; for(i=0; i<n; i++) for(j=0; j<stacks[i].cnt; j++) check[stacks[i].elems[j]] = 1; for(i=0; i<MAXELEMS; i++) if(check[i] != 1){ free(check); printf("The result is false\n"); return; } free(check); printf("The result is correct!!!\n"); }
#include "aux.h" int *stacks_list; int *stacks_cnts; int cnt, cnt2; long usecs() { struct timeval t; gettimeofday(&t, NULL); return t.tv_sec * 1000000 + t.tv_usec; } void mysleep(double sec) { long s, e; s = 0; e = 0; s = usecs(); while (((double)e - s) / 1000000 < sec) { e = usecs(); } return; } void init_stacks(stack_t ** stacks, int n) { int i; *stacks = (stack_t *) malloc(n * sizeof(stack_t)); for (i = 0; i < n; i++) { (*stacks)[i].cnt = 0; (*stacks)[i].elems = (int *)malloc(MAXELEMS * sizeof(int)); } stacks_list = (int *)malloc(MAXELEMS * sizeof(int)); stacks_cnts = (int *)malloc(MAXELEMS * sizeof(int)); for (i = 0; i < MAXELEMS; i++) { stacks_cnts[i] = 0; stacks_list[i] = rand() % n; } cnt = 0; cnt2 = 0; } void free_stacks(stack_t ** stacks, int n) { int i; for (i = 0; i < n; i++) { (*stacks)[i].cnt = 0; free((*stacks)[i].elems); } free(*stacks); free(stacks_list); free(stacks_cnts); cnt = 0; } int get_random_stack() { int c; c = cnt++; if (c >= MAXELEMS) { return -1; } else { return stacks_list[c]; } } int process() { int c; mysleep(0.0001); c = cnt2++; return c; } void check_result(stack_t * stacks, int n) { int i, j; int *check; /* for(i=0; i<n; i++){ */ /* for(j=0; j<stacks[i].cnt; j++){ */ /* if(stacks[i].elems[j] != j){ */ /* printf("The result is false\n"); */ /* return; */ /* } */ /* } */ /* if(stacks[i].cnt != stacks_cnts[i]){ */ /* printf("The result is false\n"); */ /* return; */ /* } */ /* } */ /* for(i=0; i<MAXELEMS; i++) */ /* stacks_cnts[stacks_list[i]]--; */ /* for(i=0; i<n; i++){ */ /* if(stacks_cnts[i] != 0){ */ /* printf("The result is false\n"); */ /* return; */ /* } */ /* } */ for (i = 0; i < n; i++) stacks_cnts[i] = stacks[i].cnt; for (i = 0; i < MAXELEMS; i++) stacks_cnts[stacks_list[i]]--; for (i = 0; i < n; i++) { if (stacks_cnts[i] != 0) { printf("The result is false\n"); return; } } check = (int *)malloc(MAXELEMS * sizeof(int)); for (i = 0; i < MAXELEMS; i++) check[i] = 0; for (i = 0; i < n; i++) for (j = 0; j < stacks[i].cnt; j++) check[stacks[i].elems[j]] = 1; for (i = 0; i < MAXELEMS; i++) if (check[i] != 1) { free(check); printf("The result is false\n"); return; } free(check); printf("The result is correct!!!\n"); }
#include "aux.h" int *stacks_list; int *stacks_cnts; int cnt, cnt2; long usecs() { struct timeval t; gettimeofday(&t, NULL); return t.tv_sec * 1000000 + t.tv_usec; } void mysleep(double sec) { long s, e; s = 0; e = 0; s = usecs(); while (((double)e - s) / 1000000 < sec) { e = usecs(); } return; } void init_stacks(stack_t ** stacks, int n) { int i; *stacks = (stack_t *) malloc(n * sizeof(stack_t)); for (i = 0; i < n; i++) { (*stacks)[i].cnt = 0; (*stacks)[i].elems = (int *)malloc(MAXELEMS * sizeof(int)); } stacks_list = (int *)malloc(MAXELEMS * sizeof(int)); stacks_cnts = (int *)malloc(MAXELEMS * sizeof(int)); for (i = 0; i < MAXELEMS; i++) { stacks_cnts[i] = 0; stacks_list[i] = rand() % n; } cnt = 0; cnt2 = 0; } void free_stacks(stack_t ** stacks, int n) { int i; for (i = 0; i < n; i++) { (*stacks)[i].cnt = 0; free((*stacks)[i].elems); } free(*stacks); free(stacks_list); free(stacks_cnts); cnt = 0; } int get_random_stack() { int c; #pragma omp atomic capture c = cnt++; if (c >= MAXELEMS) { return -1; } else { return stacks_list[c]; } } int process() { int c; mysleep(0.0001); #pragma omp atomic capture c = cnt2++; return c; } void check_result(stack_t * stacks, int n) { int i, j; int *check; /* for(i=0; i<n; i++){ */ /* for(j=0; j<stacks[i].cnt; j++){ */ /* if(stacks[i].elems[j] != j){ */ /* printf("The result is false\n"); */ /* return; */ /* } */ /* } */ /* if(stacks[i].cnt != stacks_cnts[i]){ */ /* printf("The result is false\n"); */ /* return; */ /* } */ /* } */ /* for(i=0; i<MAXELEMS; i++) */ /* stacks_cnts[stacks_list[i]]--; */ /* for(i=0; i<n; i++){ */ /* if(stacks_cnts[i] != 0){ */ /* printf("The result is false\n"); */ /* return; */ /* } */ /* } */ for (i = 0; i < n; i++) stacks_cnts[i] = stacks[i].cnt; for (i = 0; i < MAXELEMS; i++) stacks_cnts[stacks_list[i]]--; for (i = 0; i < n; i++) { if (stacks_cnts[i] != 0) { printf("The result is false\n"); return; } } check = (int *)malloc(MAXELEMS * sizeof(int)); for (i = 0; i < MAXELEMS; i++) check[i] = 0; for (i = 0; i < n; i++) for (j = 0; j < stacks[i].cnt; j++) check[stacks[i].elems[j]] = 1; for (i = 0; i < MAXELEMS; i++) if (check[i] != 1) { free(check); printf("The result is false\n"); return; } free(check); printf("The result is correct!!!\n"); }
GB_unaryop__lnot_fp64_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp64_int64 // op(A') function: GB_tran__lnot_fp64_int64 // C type: double // A type: int64_t // cast: double cij = (double) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp64_int64 ( double *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp64_int64 // op(A') function: GB_tran__lnot_fp64_int64 // C type: double // A type: int64_t // cast: double cij = (double) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp64_int64 ( double *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp64_int64 // op(A') function: GB_tran__lnot_fp64_int64 // C type: double // A type: int64_t // cast: double cij = (double) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp64_int64 ( double *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
WaterSurfaceMesh.h
#pragma once #include <Magnum/GL/Buffer.h> #include <Magnum/DefaultFramebuffer.h> #include <Magnum/Image.h> #include <Magnum/ImageView.h> #include <Magnum/Math/Color.h> #include <Magnum/Mesh.h> #include <Magnum/MeshTools/Compile.h> #include <Magnum/MeshTools/CompressIndices.h> #include <Magnum/MeshTools/Interleave.h> #include <Magnum/PixelFormat.h> #include <Magnum/Primitives/Cube.h> #include <Magnum/Primitives/Icosphere.h> #include <Magnum/Primitives/Plane.h> #include <Magnum/Primitives/UVSphere.h> #include <Magnum/GL/Renderer.h> #include <Magnum/SceneGraph/Camera.h> #include <Magnum/SceneGraph/Drawable.h> #include <Magnum/SceneGraph/MatrixTransformation3D.h> #include <Magnum/SceneGraph/Scene.h> #include <Magnum/GL/Shader.h> #include <Magnum/Shaders/Flat.h> #include <Magnum/Shaders/Generic.h> #include <Magnum/Shaders/MeshVisualizer.h> #include <Magnum/Shaders/Phong.h> #include <Magnum/Shaders/VertexColor.h> #include <Magnum/GL/Texture.h> #include <Magnum/GL/TextureFormat.h> #include <Magnum/Trade/MeshData3D.h> #include <iostream> #include "../base/SceneBase3D.h" #include "WaterSurfaceShader.h" #include "../../ProfileBuffer.h" namespace Magnum { class WaterSurfaceMesh : public SceneBase3D::Object3D, public SceneGraph::Drawable3D { public: struct VertexData { Vector3 position; Math::Vector<DIR_NUM, Float> amplitude; }; public: explicit WaterSurfaceMesh(SceneBase3D::Object3D * parent, SceneGraph::DrawableGroup3D *group, int n); public: template <class Fun> void setVertices(Fun fun) { // std::vector<VertexData> newData = _data; #pragma omp parallel for for (size_t i = 0; i < _data.size(); i++) { fun(i, _data[i]); } bindBuffers(_data); } void loadProfile(WaterWavelets::ProfileBuffer const& profileBuffer); void showTriangulationToggle(); protected: void bindBuffers(std::vector<VertexData> const &data); void bindTexture(); private: void draw(const Matrix4 & transformationMatrix, SceneGraph::Camera3D &camera) override; public: GL::Mesh _mesh; GL::Buffer _vertexBuffer, _indexBuffer; bool _showTriangulation = false; Shaders::WaterSurfaceShader _shader; std::vector<VertexData> _data; std::vector<UnsignedInt> _indices; GL::Texture1D _profileTexture; }; } // namespace Magnum
#pragma once #include <Magnum/GL/Buffer.h> #include <Magnum/DefaultFramebuffer.h> #include <Magnum/Image.h> #include <Magnum/ImageView.h> #include <Magnum/Math/Color.h> #include <Magnum/Mesh.h> #include <Magnum/MeshTools/Compile.h> #include <Magnum/MeshTools/CompressIndices.h> #include <Magnum/MeshTools/Interleave.h> #include <Magnum/PixelFormat.h> #include <Magnum/Primitives/Cube.h> #include <Magnum/Primitives/Icosphere.h> #include <Magnum/Primitives/Plane.h> #include <Magnum/Primitives/UVSphere.h> #include <Magnum/GL/Renderer.h> #include <Magnum/SceneGraph/Camera.h> #include <Magnum/SceneGraph/Drawable.h> #include <Magnum/SceneGraph/MatrixTransformation3D.h> #include <Magnum/SceneGraph/Scene.h> #include <Magnum/GL/Shader.h> #include <Magnum/Shaders/Flat.h> #include <Magnum/Shaders/Generic.h> #include <Magnum/Shaders/MeshVisualizer.h> #include <Magnum/Shaders/Phong.h> #include <Magnum/Shaders/VertexColor.h> #include <Magnum/GL/Texture.h> #include <Magnum/GL/TextureFormat.h> #include <Magnum/Trade/MeshData3D.h> #include <iostream> #include "../base/SceneBase3D.h" #include "WaterSurfaceShader.h" #include "../../ProfileBuffer.h" namespace Magnum { class WaterSurfaceMesh:public SceneBase3D::Object3D, public SceneGraph::Drawable3D { public: struct VertexData { Vector3 position; Math: : Vector < DIR_NUM, Float > amplitude; }; public: explicit WaterSurfaceMesh(SceneBase3D: : Object3D * parent, SceneGraph: : DrawableGroup3D * group, int n); public: template < class Fun > void setVertices(Fun fun) { //std::vector < VertexData > newData = _data; for (size_t i = 0; i < _data.size(); i++) { fun(i, _data[i]); } bindBuffers(_data); } void loadProfile(WaterWavelets::ProfileBuffer const &profileBuffer); void showTriangulationToggle(); protected: void bindBuffers(std::vector < VertexData > const &data); void bindTexture(); private: void draw(const Matrix4 & transformationMatrix, SceneGraph::Camera3D & camera)override; public: GL: : Mesh _mesh; GL: : Buffer _vertexBuffer, _indexBuffer; bool _showTriangulation = false; Shaders: : WaterSurfaceShader _shader; std: : vector < VertexData > _data; std: : vector < UnsignedInt > _indices; GL: : Texture1D _profileTexture; }; } //namespace Magnum
#pragma once #include <Magnum/GL/Buffer.h> #include <Magnum/DefaultFramebuffer.h> #include <Magnum/Image.h> #include <Magnum/ImageView.h> #include <Magnum/Math/Color.h> #include <Magnum/Mesh.h> #include <Magnum/MeshTools/Compile.h> #include <Magnum/MeshTools/CompressIndices.h> #include <Magnum/MeshTools/Interleave.h> #include <Magnum/PixelFormat.h> #include <Magnum/Primitives/Cube.h> #include <Magnum/Primitives/Icosphere.h> #include <Magnum/Primitives/Plane.h> #include <Magnum/Primitives/UVSphere.h> #include <Magnum/GL/Renderer.h> #include <Magnum/SceneGraph/Camera.h> #include <Magnum/SceneGraph/Drawable.h> #include <Magnum/SceneGraph/MatrixTransformation3D.h> #include <Magnum/SceneGraph/Scene.h> #include <Magnum/GL/Shader.h> #include <Magnum/Shaders/Flat.h> #include <Magnum/Shaders/Generic.h> #include <Magnum/Shaders/MeshVisualizer.h> #include <Magnum/Shaders/Phong.h> #include <Magnum/Shaders/VertexColor.h> #include <Magnum/GL/Texture.h> #include <Magnum/GL/TextureFormat.h> #include <Magnum/Trade/MeshData3D.h> #include <iostream> #include "../base/SceneBase3D.h" #include "WaterSurfaceShader.h" #include "../../ProfileBuffer.h" namespace Magnum { class WaterSurfaceMesh:public SceneBase3D::Object3D, public SceneGraph::Drawable3D { public: struct VertexData { Vector3 position; Math: : Vector < DIR_NUM, Float > amplitude; }; public: explicit WaterSurfaceMesh(SceneBase3D: : Object3D * parent, SceneGraph: : DrawableGroup3D * group, int n); public: template < class Fun > void setVertices(Fun fun) { //std::vector < VertexData > newData = _data; #pragma omp parallel for for (size_t i = 0; i < _data.size(); i++) { fun(i, _data[i]); } bindBuffers(_data); } void loadProfile(WaterWavelets::ProfileBuffer const &profileBuffer); void showTriangulationToggle(); protected: void bindBuffers(std::vector < VertexData > const &data); void bindTexture(); private: void draw(const Matrix4 & transformationMatrix, SceneGraph::Camera3D & camera)override; public: GL: : Mesh _mesh; GL: : Buffer _vertexBuffer, _indexBuffer; bool _showTriangulation = false; Shaders: : WaterSurfaceShader _shader; std: : vector < VertexData > _data; std: : vector < UnsignedInt > _indices; GL: : Texture1D _profileTexture; }; } //namespace Magnum
omp_parallel_for_lastprivate.c
<ompts:test> <ompts:testdescription>Test which checks the omp parallel for lastprivate directive.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp parallel for lastprivate</ompts:directive> <ompts:dependences>omp parallel for reduction,omp parallel for private</ompts:dependences> <ompts:testcode> #include <stdio.h> #include "omp_testsuite.h" int <ompts:testcode:functionname>omp_parallel_for_lastprivate</ompts:testcode:functionname>(FILE * logFile){ <ompts:orphan:vars> int sum; int i; int i0; </ompts:orphan:vars> sum =0; i0 = -1; int known_sum; #pragma omp parallel for reduction(+:sum) schedule(static,7) private(i) <ompts:check>lastprivate(i0)</ompts:check><ompts:crosscheck>private(i0)</ompts:crosscheck> <ompts:orphan> for (i = 1; i <= LOOPCOUNT; i++) { sum = sum + i; i0 = i; } /*end of for*/ /* end of parallel*/ </ompts:orphan> known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; return ((known_sum == sum) && (i0 == LOOPCOUNT)); } /* end of check_parallel_for_lastprivate */ </ompts:testcode> </ompts:test>
< ompts:test > <ompts:testdescription > Test which checks the omp parallel for lastprivate directive.< /ompts:testdescription > <ompts: ompversion > 2.0 < /ompts:ompversion > <ompts:directive > omp parallel for lastprivate </ompts:directive > <ompts: dependences > omp parallel for reduction ,omp parallel for private </ompts:dependences > <ompts: testcode > #include <stdio.h> #include "omp_testsuite.h" int <ompts:testcode:functionname > omp_parallel_for_lastprivate < /ompts:testcode:functionname > (FILE * logFile) { <ompts:orphan:vars > int sum; int i; int i0; </ompts: orphan: vars > sum = 0; i0 = -1; int known_sum; <ompts: orphan > for (i = 1; i <= LOOPCOUNT; i++) { sum = sum + i; i0 = i; } /* end of for */ /* end of parallel */ </ompts: orphan > known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; return ((known_sum == sum) && (i0 == LOOPCOUNT)); } /* end of check_parallel_for_lastprivate */ </ompts:testcode > </ompts:test >
< ompts:test > <ompts:testdescription > Test which checks the omp parallel for lastprivate directive.< /ompts:testdescription > <ompts: ompversion > 2.0 < /ompts:ompversion > <ompts:directive > omp parallel for lastprivate </ompts:directive > <ompts: dependences > omp parallel for reduction ,omp parallel for private </ompts:dependences > <ompts: testcode > #include <stdio.h> #include "omp_testsuite.h" int <ompts:testcode:functionname > omp_parallel_for_lastprivate < /ompts:testcode:functionname > (FILE * logFile) { <ompts:orphan:vars > int sum; int i; int i0; </ompts: orphan: vars > sum = 0; i0 = -1; int known_sum; #pragma omp parallel for reduction(+:sum) schedule(static,7) private(i) <ompts:check>lastprivate(i0)</ompts:check><ompts:crosscheck>private(i0)</ompts:crosscheck> <ompts: orphan > for (i = 1; i <= LOOPCOUNT; i++) { sum = sum + i; i0 = i; } /* end of for */ /* end of parallel */ </ompts: orphan > known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; return ((known_sum == sum) && (i0 == LOOPCOUNT)); } /* end of check_parallel_for_lastprivate */ </ompts:testcode > </ompts:test >
test-math-vector-sincos.h
/* Wrappers definitions for tests of ABI of vector sincos/sincosf having vector declaration "#pragma omp declare simd notinbranch". Copyright (C) 2016-2017 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #define INIT_VEC_PTRS_LOOP(vec, val, len) \ do \ { \ for (i = 0; i < len; i++) \ { \ vec[i] = &val[i]; \ } \ } \ while (0) /* Wrapper for vector sincos/sincosf compatible with x86_64 and x32 variants of _ZGVbN2vvv_sincos, _ZGVdN4vvv_sincos, _ZGVeN8vvv_sincos; x32 variants of _ZGVbN4vvv_sincosf, _ZGVcN4vvv_sincos, _ZGVdN8vvv_sincosf, _ZGVeN16vvv_sincosf. */ #define VECTOR_WRAPPER_fFF_2(scalar_func, vector_func) \ extern void vector_func (VEC_TYPE, VEC_INT_TYPE, VEC_INT_TYPE); \ void scalar_func (FLOAT x, FLOAT * r, FLOAT * r1) \ { \ int i; \ FLOAT r_loc[VEC_LEN], r1_loc[VEC_LEN]; \ VEC_TYPE mx; \ VEC_INT_TYPE mr, mr1; \ INIT_VEC_LOOP (mx, x, VEC_LEN); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr), r_loc, VEC_LEN); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr1), r1_loc, VEC_LEN); \ vector_func (mx, mr, mr1); \ TEST_VEC_LOOP (r_loc, VEC_LEN); \ TEST_VEC_LOOP (r1_loc, VEC_LEN); \ *r = r_loc[0]; \ *r1 = r1_loc[0]; \ return; \ } /* Wrapper for vector sincos/sincosf compatible with x86_64 variants of _ZGVcN4vvv_sincos, _ZGVeN16vvv_sincosf, _ZGVbN4vvv_sincosf, _ZGVdN8vvv_sincosf, _ZGVcN8vvv_sincosf. */ #define VECTOR_WRAPPER_fFF_3(scalar_func, vector_func) \ extern void vector_func (VEC_TYPE, VEC_INT_TYPE, VEC_INT_TYPE, \ VEC_INT_TYPE, VEC_INT_TYPE); \ void scalar_func (FLOAT x, FLOAT * r, FLOAT * r1) \ { \ int i; \ FLOAT r_loc[VEC_LEN/2], r1_loc[VEC_LEN/2]; \ VEC_TYPE mx; \ VEC_INT_TYPE mr, mr1; \ INIT_VEC_LOOP (mx, x, VEC_LEN); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr), r_loc, VEC_LEN/2); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr1), r1_loc, VEC_LEN/2); \ vector_func (mx, mr, mr, mr1, mr1); \ TEST_VEC_LOOP (r_loc, VEC_LEN/2); \ TEST_VEC_LOOP (r1_loc, VEC_LEN/2); \ *r = r_loc[0]; \ *r1 = r1_loc[0]; \ return; \ } /* Wrapper for vector sincosf compatible with x86_64 variant of _ZGVcN8vvv_sincosf. */ #define VECTOR_WRAPPER_fFF_4(scalar_func, vector_func) \ extern void vector_func (VEC_TYPE, VEC_INT_TYPE, VEC_INT_TYPE, \ VEC_INT_TYPE, VEC_INT_TYPE, \ VEC_INT_TYPE, VEC_INT_TYPE, \ VEC_INT_TYPE, VEC_INT_TYPE); \ void scalar_func (FLOAT x, FLOAT * r, FLOAT * r1) \ { \ int i; \ FLOAT r_loc[VEC_LEN/4], r1_loc[VEC_LEN/4]; \ VEC_TYPE mx; \ VEC_INT_TYPE mr, mr1; \ INIT_VEC_LOOP (mx, x, VEC_LEN); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr), r_loc, VEC_LEN/4); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr1), r1_loc, VEC_LEN/4); \ vector_func (mx, mr, mr, mr, mr, mr1, mr1, mr1, mr1); \ TEST_VEC_LOOP (r_loc, VEC_LEN/4); \ TEST_VEC_LOOP (r1_loc, VEC_LEN/4); \ *r = r_loc[0]; \ *r1 = r1_loc[0]; \ return; \ }
#define INIT_VEC_PTRS_LOOP(vec, val, len) \ do \ { \ for (i = 0; i < len; i++) \ { \ vec[i] = &val[i]; \ } \ } \ while (0) /* * Wrapper for vector sincos/sincosf compatible with x86_64 and x32 variants * of _ZGVbN2vvv_sincos, _ZGVdN4vvv_sincos, _ZGVeN8vvv_sincos; x32 variants * of _ZGVbN4vvv_sincosf, _ZGVcN4vvv_sincos, _ZGVdN8vvv_sincosf, * _ZGVeN16vvv_sincosf. */ #define VECTOR_WRAPPER_fFF_2(scalar_func, vector_func) \ extern void vector_func (VEC_TYPE, VEC_INT_TYPE, VEC_INT_TYPE); \ void scalar_func (FLOAT x, FLOAT * r, FLOAT * r1) \ { \ int i; \ FLOAT r_loc[VEC_LEN], r1_loc[VEC_LEN]; \ VEC_TYPE mx; \ VEC_INT_TYPE mr, mr1; \ INIT_VEC_LOOP (mx, x, VEC_LEN); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr), r_loc, VEC_LEN); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr1), r1_loc, VEC_LEN); \ vector_func (mx, mr, mr1); \ TEST_VEC_LOOP (r_loc, VEC_LEN); \ TEST_VEC_LOOP (r1_loc, VEC_LEN); \ *r = r_loc[0]; \ *r1 = r1_loc[0]; \ return; \ } /* * Wrapper for vector sincos/sincosf compatible with x86_64 variants of * _ZGVcN4vvv_sincos, _ZGVeN16vvv_sincosf, _ZGVbN4vvv_sincosf, * _ZGVdN8vvv_sincosf, _ZGVcN8vvv_sincosf. */ #define VECTOR_WRAPPER_fFF_3(scalar_func, vector_func) \ extern void vector_func (VEC_TYPE, VEC_INT_TYPE, VEC_INT_TYPE, \ VEC_INT_TYPE, VEC_INT_TYPE); \ void scalar_func (FLOAT x, FLOAT * r, FLOAT * r1) \ { \ int i; \ FLOAT r_loc[VEC_LEN/2], r1_loc[VEC_LEN/2]; \ VEC_TYPE mx; \ VEC_INT_TYPE mr, mr1; \ INIT_VEC_LOOP (mx, x, VEC_LEN); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr), r_loc, VEC_LEN/2); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr1), r1_loc, VEC_LEN/2); \ vector_func (mx, mr, mr, mr1, mr1); \ TEST_VEC_LOOP (r_loc, VEC_LEN/2); \ TEST_VEC_LOOP (r1_loc, VEC_LEN/2); \ *r = r_loc[0]; \ *r1 = r1_loc[0]; \ return; \ } /* * Wrapper for vector sincosf compatible with x86_64 variant of * _ZGVcN8vvv_sincosf. */ #define VECTOR_WRAPPER_fFF_4(scalar_func, vector_func) \ extern void vector_func (VEC_TYPE, VEC_INT_TYPE, VEC_INT_TYPE, \ VEC_INT_TYPE, VEC_INT_TYPE, \ VEC_INT_TYPE, VEC_INT_TYPE, \ VEC_INT_TYPE, VEC_INT_TYPE); \ void scalar_func (FLOAT x, FLOAT * r, FLOAT * r1) \ { \ int i; \ FLOAT r_loc[VEC_LEN/4], r1_loc[VEC_LEN/4]; \ VEC_TYPE mx; \ VEC_INT_TYPE mr, mr1; \ INIT_VEC_LOOP (mx, x, VEC_LEN); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr), r_loc, VEC_LEN/4); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr1), r1_loc, VEC_LEN/4); \ vector_func (mx, mr, mr, mr, mr, mr1, mr1, mr1, mr1); \ TEST_VEC_LOOP (r_loc, VEC_LEN/4); \ TEST_VEC_LOOP (r1_loc, VEC_LEN/4); \ *r = r_loc[0]; \ *r1 = r1_loc[0]; \ return; \ }
#define INIT_VEC_PTRS_LOOP(vec, val, len) \ do \ { \ for (i = 0; i < len; i++) \ { \ vec[i] = &val[i]; \ } \ } \ while (0) /* * Wrapper for vector sincos/sincosf compatible with x86_64 and x32 variants * of _ZGVbN2vvv_sincos, _ZGVdN4vvv_sincos, _ZGVeN8vvv_sincos; x32 variants * of _ZGVbN4vvv_sincosf, _ZGVcN4vvv_sincos, _ZGVdN8vvv_sincosf, * _ZGVeN16vvv_sincosf. */ #define VECTOR_WRAPPER_fFF_2(scalar_func, vector_func) \ extern void vector_func (VEC_TYPE, VEC_INT_TYPE, VEC_INT_TYPE); \ void scalar_func (FLOAT x, FLOAT * r, FLOAT * r1) \ { \ int i; \ FLOAT r_loc[VEC_LEN], r1_loc[VEC_LEN]; \ VEC_TYPE mx; \ VEC_INT_TYPE mr, mr1; \ INIT_VEC_LOOP (mx, x, VEC_LEN); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr), r_loc, VEC_LEN); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr1), r1_loc, VEC_LEN); \ vector_func (mx, mr, mr1); \ TEST_VEC_LOOP (r_loc, VEC_LEN); \ TEST_VEC_LOOP (r1_loc, VEC_LEN); \ *r = r_loc[0]; \ *r1 = r1_loc[0]; \ return; \ } /* * Wrapper for vector sincos/sincosf compatible with x86_64 variants of * _ZGVcN4vvv_sincos, _ZGVeN16vvv_sincosf, _ZGVbN4vvv_sincosf, * _ZGVdN8vvv_sincosf, _ZGVcN8vvv_sincosf. */ #define VECTOR_WRAPPER_fFF_3(scalar_func, vector_func) \ extern void vector_func (VEC_TYPE, VEC_INT_TYPE, VEC_INT_TYPE, \ VEC_INT_TYPE, VEC_INT_TYPE); \ void scalar_func (FLOAT x, FLOAT * r, FLOAT * r1) \ { \ int i; \ FLOAT r_loc[VEC_LEN/2], r1_loc[VEC_LEN/2]; \ VEC_TYPE mx; \ VEC_INT_TYPE mr, mr1; \ INIT_VEC_LOOP (mx, x, VEC_LEN); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr), r_loc, VEC_LEN/2); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr1), r1_loc, VEC_LEN/2); \ vector_func (mx, mr, mr, mr1, mr1); \ TEST_VEC_LOOP (r_loc, VEC_LEN/2); \ TEST_VEC_LOOP (r1_loc, VEC_LEN/2); \ *r = r_loc[0]; \ *r1 = r1_loc[0]; \ return; \ } /* * Wrapper for vector sincosf compatible with x86_64 variant of * _ZGVcN8vvv_sincosf. */ #define VECTOR_WRAPPER_fFF_4(scalar_func, vector_func) \ extern void vector_func (VEC_TYPE, VEC_INT_TYPE, VEC_INT_TYPE, \ VEC_INT_TYPE, VEC_INT_TYPE, \ VEC_INT_TYPE, VEC_INT_TYPE, \ VEC_INT_TYPE, VEC_INT_TYPE); \ void scalar_func (FLOAT x, FLOAT * r, FLOAT * r1) \ { \ int i; \ FLOAT r_loc[VEC_LEN/4], r1_loc[VEC_LEN/4]; \ VEC_TYPE mx; \ VEC_INT_TYPE mr, mr1; \ INIT_VEC_LOOP (mx, x, VEC_LEN); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr), r_loc, VEC_LEN/4); \ INIT_VEC_PTRS_LOOP (((FLOAT **) &mr1), r1_loc, VEC_LEN/4); \ vector_func (mx, mr, mr, mr, mr, mr1, mr1, mr1, mr1); \ TEST_VEC_LOOP (r_loc, VEC_LEN/4); \ TEST_VEC_LOOP (r1_loc, VEC_LEN/4); \ *r = r_loc[0]; \ *r1 = r1_loc[0]; \ return; \ }
general_basis_rep.h
#ifndef _GENERAL_BASIS_REP_H #define _GENERAL_BASIS_REP_H #include <complex> #include <limits> #include <iostream> #include "general_basis_core.h" #include "numpy/ndarraytypes.h" #include "misc.h" #include "openmp.h" namespace basis_general { template<class I,class J,class P=signed char> int general_normalization(general_basis_core<I,P> *B, I s[], J n[], const npy_intp Ns ) { int err = 0; int nt=B->get_nt(); int per_factor=1.0; for(int i=0;i<nt;i++){ per_factor *= B->pers[i]; } const npy_intp chunk = std::max(Ns/(100*omp_get_max_threads()),(npy_intp)1); // check_state has variable workload #pragma omp parallel for schedule(dynamic,chunk) for(npy_intp i=0;i<Ns;i++){ if(err != 0){ continue; } double norm = B->check_state(s[i]); npy_intp int_norm = norm; // checks if data type is large enough if(!check_nan(norm) && int_norm>0 ){ if( (npy_uintp)(int_norm * per_factor) > std::numeric_limits<J>::max() ){ #pragma omp critical err = 1; } n[i] = (J)norm * per_factor; } else{ n[i] = 0; } } return err; } template<class I,class P=signed char> void general_representative(general_basis_core<I,P> *B, const I s[], I r[], int *g_out_ptr, P *phase_out_ptr, const npy_intp Ns ) { const int nt = B->get_nt(); if(g_out_ptr && phase_out_ptr){ #pragma omp parallel { #pragma omp for schedule(static) // NOTE: refstate time has a constant workload for(npy_intp i=0;i<Ns;i++){ P temp_phase = 1; r[i] = B->ref_state(s[i],&g_out_ptr[i*nt],temp_phase); phase_out_ptr[i] = temp_phase; } } } else if(g_out_ptr){ #pragma omp parallel { #pragma omp parallel for schedule(static) // NOTE: refstate time has a constant workload for(npy_intp i=0;i<Ns;i++){ P temp_phase = 1; r[i] = B->ref_state(s[i],&g_out_ptr[i*nt],temp_phase); } } } else if(phase_out_ptr){ #pragma omp parallel { int g[__GENERAL_BASIS_CORE__max_nt]; #pragma omp for schedule(static) // NOTE: refstate time has a constant workload for(npy_intp i=0;i<Ns;i++){ P temp_phase = 1; r[i] = B->ref_state(s[i],g,temp_phase); phase_out_ptr[i] = temp_phase; } } } else{ #pragma omp parallel { int g[__GENERAL_BASIS_CORE__max_nt]; #pragma omp for schedule(static) // NOTE: refstate time has a constant workload for(npy_intp i=0;i<Ns;i++){ P temp_phase = 1; r[i] = B->ref_state(s[i],g,temp_phase); } } } } } #endif
#ifndef _GENERAL_BASIS_REP_H #define _GENERAL_BASIS_REP_H #include <complex> #include <limits> #include <iostream> #include "general_basis_core.h" #include "numpy/ndarraytypes.h" #include "misc.h" #include "openmp.h" namespace basis_general { template < class I, class J, class P = signed char > int general_normalization(general_basis_core < I, P > *B, I s[], J n[], const npy_intp Ns ) {int err = 0; int nt = B->get_nt(); int per_factor = 1.0; for (int i = 0; i < nt; i++) { per_factor *= B->pers[i]; } const npy_intp chunk = std::max(Ns / (100 * omp_get_max_threads()), (npy_intp) 1); //check_state has variable workload for (npy_intp i = 0; i < Ns; i++) { if (err != 0) { continue; } double norm = B->check_state(s[i]); npy_intp int_norm = norm; //checks if data type is large enough if (!check_nan(norm) && int_norm > 0) { if ((npy_uintp) (int_norm * per_factor) > std: : numeric_limits < J >: :max()) { err = 1; } n[i] = (J) norm *per_factor; } else { n[i] = 0; } } return err; } template < class I, class P = signed char > void general_representative(general_basis_core < I, P > *B, const I s[], I r[], int *g_out_ptr, P * phase_out_ptr, const npy_intp Ns ) { const int nt = B->get_nt(); if (g_out_ptr && phase_out_ptr) { for (npy_intp i = 0; i < Ns; i++) { P temp_phase = 1; r[i] = B->ref_state(s[i], &g_out_ptr[i * nt], temp_phase); phase_out_ptr[i] = temp_phase; } } else if (g_out_ptr) { for (npy_intp i = 0; i < Ns; i++) { P temp_phase = 1; r[i] = B->ref_state(s[i], &g_out_ptr[i * nt], temp_phase); } } else if (phase_out_ptr) { int g[__GENERAL_BASIS_CORE__max_nt]; for (npy_intp i = 0; i < Ns; i++) { P temp_phase = 1; r[i] = B->ref_state(s[i], g, temp_phase); phase_out_ptr[i] = temp_phase; } } else { int g[__GENERAL_BASIS_CORE__max_nt]; for (npy_intp i = 0; i < Ns; i++) { P temp_phase = 1; r[i] = B->ref_state(s[i], g, temp_phase); } } } } #endif
#ifndef _GENERAL_BASIS_REP_H #define _GENERAL_BASIS_REP_H #include <complex> #include <limits> #include <iostream> #include "general_basis_core.h" #include "numpy/ndarraytypes.h" #include "misc.h" #include "openmp.h" namespace basis_general { template < class I, class J, class P = signed char > int general_normalization(general_basis_core < I, P > *B, I s[], J n[], const npy_intp Ns ) {int err = 0; int nt = B->get_nt(); int per_factor = 1.0; for (int i = 0; i < nt; i++) { per_factor *= B->pers[i]; } const npy_intp chunk = std::max(Ns / (100 * omp_get_max_threads()), (npy_intp) 1); //check_state has variable workload #pragma omp parallel for schedule(dynamic,chunk) for (npy_intp i = 0; i < Ns; i++) { if (err != 0) { continue; } double norm = B->check_state(s[i]); npy_intp int_norm = norm; //checks if data type is large enough if (!check_nan(norm) && int_norm > 0) { if ((npy_uintp) (int_norm * per_factor) > std: : numeric_limits < J >: :max()) { #pragma omp critical err = 1; } n[i] = (J) norm *per_factor; } else { n[i] = 0; } } return err; } template < class I, class P = signed char > void general_representative(general_basis_core < I, P > *B, const I s[], I r[], int *g_out_ptr, P * phase_out_ptr, const npy_intp Ns ) { const int nt = B->get_nt(); if (g_out_ptr && phase_out_ptr) { #pragma omp parallel { #pragma omp for schedule(static) // NOTE: refstate time has a constant workload for (npy_intp i = 0; i < Ns; i++) { P temp_phase = 1; r[i] = B->ref_state(s[i], &g_out_ptr[i * nt], temp_phase); phase_out_ptr[i] = temp_phase; } } } else if (g_out_ptr) { #pragma omp parallel { #pragma omp parallel for schedule(static) // NOTE: refstate time has a constant workload for (npy_intp i = 0; i < Ns; i++) { P temp_phase = 1; r[i] = B->ref_state(s[i], &g_out_ptr[i * nt], temp_phase); } } } else if (phase_out_ptr) { #pragma omp parallel { int g[__GENERAL_BASIS_CORE__max_nt]; #pragma omp for schedule(static) // NOTE: refstate time has a constant workload for (npy_intp i = 0; i < Ns; i++) { P temp_phase = 1; r[i] = B->ref_state(s[i], g, temp_phase); phase_out_ptr[i] = temp_phase; } } } else { #pragma omp parallel { int g[__GENERAL_BASIS_CORE__max_nt]; #pragma omp for schedule(static) // NOTE: refstate time has a constant workload for (npy_intp i = 0; i < Ns; i++) { P temp_phase = 1; r[i] = B->ref_state(s[i], g, temp_phase); } } } } } #endif
threads.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt #include "callback.h" #include <omp.h> int main() { int x = 0; #pragma omp parallel num_threads(4) { #pragma omp atomic x++; } // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_thread_begin: // CHECK-SAME: thread_type=ompt_thread_initial=1, thread_id=[[MASTER_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_thread_end: // CHECK-SAME: thread_id=[[MASTER_ID]] // CHECK: {{^}}[[WORKER_ID1:[0-9]+]]: ompt_event_thread_begin: // CHECK-SAME: thread_type=ompt_thread_worker=2, thread_id=[[WORKER_ID1]] // CHECK: {{^}}[[WORKER_ID1]]: ompt_event_thread_end: // CHECK-SAME: thread_id=[[WORKER_ID1]] // CHECK: {{^}}[[WORKER_ID2:[0-9]+]]: ompt_event_thread_begin: // CHECK-SAME: thread_type=ompt_thread_worker=2, thread_id=[[WORKER_ID2]] // CHECK: {{^}}[[WORKER_ID2]]: ompt_event_thread_end: // CHECK-SAME: thread_id=[[WORKER_ID2]] // CHECK: {{^}}[[WORKER_ID3:[0-9]+]]: ompt_event_thread_begin: // CHECK-SAME: thread_type=ompt_thread_worker=2, thread_id=[[WORKER_ID3]] // CHECK: {{^}}[[WORKER_ID3]]: ompt_event_thread_end: // CHECK-SAME: thread_id=[[WORKER_ID3]] return 0; }
// RUN:%libomp - compile - and - run | %sort - threads | FileCheck % s // REQUIRES:ompt #include "callback.h" #include <omp.h> int main() { int x = 0; x++; //CHECK: 0: NULL_POINTER =[[NULL:.* $]] // CHECK:{ { ^ } }[[MASTER_ID: [0 - 9] +]]: ompt_event_thread_begin: //CHECK - SAME:thread_type = ompt_thread_initial = 1, thread_id =[[MASTER_ID]] // CHECK:{ { ^ } }[[MASTER_ID]]: ompt_event_thread_end: //CHECK - SAME:thread_id =[[MASTER_ID]] // CHECK:{ { ^ } }[[WORKER_ID1: [0 - 9] +]]: ompt_event_thread_begin: //CHECK - SAME:thread_type = ompt_thread_worker = 2, thread_id =[[WORKER_ID1]] // CHECK:{ { ^ } }[[WORKER_ID1]]: ompt_event_thread_end: //CHECK - SAME:thread_id =[[WORKER_ID1]] // CHECK:{ { ^ } }[[WORKER_ID2: [0 - 9] +]]: ompt_event_thread_begin: //CHECK - SAME:thread_type = ompt_thread_worker = 2, thread_id =[[WORKER_ID2]] // CHECK:{ { ^ } }[[WORKER_ID2]]: ompt_event_thread_end: //CHECK - SAME:thread_id =[[WORKER_ID2]] // CHECK:{ { ^ } }[[WORKER_ID3: [0 - 9] +]]: ompt_event_thread_begin: //CHECK - SAME:thread_type = ompt_thread_worker = 2, thread_id =[[WORKER_ID3]] // CHECK:{ { ^ } }[[WORKER_ID3]]: ompt_event_thread_end: //CHECK - SAME:thread_id =[[WORKER_ID3]] return 0; }
// RUN:%libomp - compile - and - run | %sort - threads | FileCheck % s // REQUIRES:ompt #include "callback.h" #include <omp.h> int main() { int x = 0; #pragma omp parallel num_threads(4) { #pragma omp atomic x++; } //CHECK: 0: NULL_POINTER =[[NULL:.* $]] // CHECK:{ { ^ } }[[MASTER_ID: [0 - 9] +]]: ompt_event_thread_begin: //CHECK - SAME:thread_type = ompt_thread_initial = 1, thread_id =[[MASTER_ID]] // CHECK:{ { ^ } }[[MASTER_ID]]: ompt_event_thread_end: //CHECK - SAME:thread_id =[[MASTER_ID]] // CHECK:{ { ^ } }[[WORKER_ID1: [0 - 9] +]]: ompt_event_thread_begin: //CHECK - SAME:thread_type = ompt_thread_worker = 2, thread_id =[[WORKER_ID1]] // CHECK:{ { ^ } }[[WORKER_ID1]]: ompt_event_thread_end: //CHECK - SAME:thread_id =[[WORKER_ID1]] // CHECK:{ { ^ } }[[WORKER_ID2: [0 - 9] +]]: ompt_event_thread_begin: //CHECK - SAME:thread_type = ompt_thread_worker = 2, thread_id =[[WORKER_ID2]] // CHECK:{ { ^ } }[[WORKER_ID2]]: ompt_event_thread_end: //CHECK - SAME:thread_id =[[WORKER_ID2]] // CHECK:{ { ^ } }[[WORKER_ID3: [0 - 9] +]]: ompt_event_thread_begin: //CHECK - SAME:thread_type = ompt_thread_worker = 2, thread_id =[[WORKER_ID3]] // CHECK:{ { ^ } }[[WORKER_ID3]]: ompt_event_thread_end: //CHECK - SAME:thread_id =[[WORKER_ID3]] return 0; }
fprintf.c
#include <stdio.h> #include <omp.h> void write_index(int*a, int N, FILE* fileptr ){ printf(" ===> Encounter target pragma fileptr:%p \n", fileptr); // fileptr is host pointer, but set is_device_ptr so openmp does not map it. #pragma omp target teams distribute parallel for map(tofrom: a[0:N]) is_device_ptr(fileptr) for(int i=0;i<N;i++) { fprintf(fileptr, "fprintf: updating a[%d] addr:%p file ptr:%p\n",i,&a[i], fileptr); a[i]=i; } fprintf(stderr,"Check stderr is ok after target map stderr:%p \n",stderr); } int main(){ const int N = 10; int a[N],validate[N]; for(int i=0;i<N;i++) { a[i]=0; validate[i]=i; } // 1stpass checks writing to stderr write_index(a,N,stderr); int flag=-1; // Mark Success for(int i=0;i<N;i++) { if(a[i]!=validate[i]) { // print 1st bad index if( flag == -1 ) printf("PASS1 First fail: a[%d](%d) != validate[%d](%d)\n",i,a[i],i,validate[i]); flag = i; } a[i]=0; // reset for 2nd test } if( flag == -1 ){ printf("PASS1 writing to stderr: Success\n"); } else { printf("PASS1 Last fail: a[%d](%d) != validate[%d](%d)\n",flag,a[flag],flag,validate[flag]); printf("Fail\n"); return 1; } // 2nd pass checks writing to open file pointer FILE* fileptr = fopen("gpu.log", "w"); write_index(a,N,fileptr); fclose(fileptr); flag=-1; for(int i=0;i<N;i++) { if(a[i]!=validate[i]) { // print 1st bad index if( flag == -1 ) printf("PASS2 First fail: a[%d](%d) != validate[%d](%d)\n",i,a[i],i,validate[i]); flag = i; } a[i]=0; // reset for 2nd test } if( flag == -1 ){ printf("PASS2 writing to open file: Success\n"); return 0; } else { printf("PASS2 Last fail: a[%d](%d) != validate[%d](%d)\n",flag,a[flag],flag,validate[flag]); printf("Fail\n"); return 1; } }
#include <stdio.h> #include <omp.h> void write_index(int*a, int N, FILE* fileptr ){ printf(" ===> Encounter target pragma fileptr:%p \n", fileptr); // fileptr is host pointer, but set is_device_ptr so openmp does not map it. for(int i=0;i<N;i++) { fprintf(fileptr, "fprintf: updating a[%d] addr:%p file ptr:%p\n",i,&a[i], fileptr); a[i]=i; } fprintf(stderr,"Check stderr is ok after target map stderr:%p \n",stderr); } int main(){ const int N = 10; int a[N],validate[N]; for(int i=0;i<N;i++) { a[i]=0; validate[i]=i; } // 1stpass checks writing to stderr write_index(a,N,stderr); int flag=-1; // Mark Success for(int i=0;i<N;i++) { if(a[i]!=validate[i]) { // print 1st bad index if( flag == -1 ) printf("PASS1 First fail: a[%d](%d) != validate[%d](%d)\n",i,a[i],i,validate[i]); flag = i; } a[i]=0; // reset for 2nd test } if( flag == -1 ){ printf("PASS1 writing to stderr: Success\n"); } else { printf("PASS1 Last fail: a[%d](%d) != validate[%d](%d)\n",flag,a[flag],flag,validate[flag]); printf("Fail\n"); return 1; } // 2nd pass checks writing to open file pointer FILE* fileptr = fopen("gpu.log", "w"); write_index(a,N,fileptr); fclose(fileptr); flag=-1; for(int i=0;i<N;i++) { if(a[i]!=validate[i]) { // print 1st bad index if( flag == -1 ) printf("PASS2 First fail: a[%d](%d) != validate[%d](%d)\n",i,a[i],i,validate[i]); flag = i; } a[i]=0; // reset for 2nd test } if( flag == -1 ){ printf("PASS2 writing to open file: Success\n"); return 0; } else { printf("PASS2 Last fail: a[%d](%d) != validate[%d](%d)\n",flag,a[flag],flag,validate[flag]); printf("Fail\n"); return 1; } }
#include <stdio.h> #include <omp.h> void write_index(int*a, int N, FILE* fileptr ){ printf(" ===> Encounter target pragma fileptr:%p \n", fileptr); // fileptr is host pointer, but set is_device_ptr so openmp does not map it. #pragma omp target teams distribute parallel for map(tofrom: a[0:N]) is_device_ptr(fileptr) for(int i=0;i<N;i++) { fprintf(fileptr, "fprintf: updating a[%d] addr:%p file ptr:%p\n",i,&a[i], fileptr); a[i]=i; } fprintf(stderr,"Check stderr is ok after target map stderr:%p \n",stderr); } int main(){ const int N = 10; int a[N],validate[N]; for(int i=0;i<N;i++) { a[i]=0; validate[i]=i; } // 1stpass checks writing to stderr write_index(a,N,stderr); int flag=-1; // Mark Success for(int i=0;i<N;i++) { if(a[i]!=validate[i]) { // print 1st bad index if( flag == -1 ) printf("PASS1 First fail: a[%d](%d) != validate[%d](%d)\n",i,a[i],i,validate[i]); flag = i; } a[i]=0; // reset for 2nd test } if( flag == -1 ){ printf("PASS1 writing to stderr: Success\n"); } else { printf("PASS1 Last fail: a[%d](%d) != validate[%d](%d)\n",flag,a[flag],flag,validate[flag]); printf("Fail\n"); return 1; } // 2nd pass checks writing to open file pointer FILE* fileptr = fopen("gpu.log", "w"); write_index(a,N,fileptr); fclose(fileptr); flag=-1; for(int i=0;i<N;i++) { if(a[i]!=validate[i]) { // print 1st bad index if( flag == -1 ) printf("PASS2 First fail: a[%d](%d) != validate[%d](%d)\n",i,a[i],i,validate[i]); flag = i; } a[i]=0; // reset for 2nd test } if( flag == -1 ){ printf("PASS2 writing to open file: Success\n"); return 0; } else { printf("PASS2 Last fail: a[%d](%d) != validate[%d](%d)\n",flag,a[flag],flag,validate[flag]); printf("Fail\n"); return 1; } }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1,2),ceild(8*t2-Nz+5,8));t3<=min(floord(4*Nt+Ny-9,8),floord(4*t1+Ny-1,8));t3++) { for (t4=max(max(ceild(t1-254,256),ceild(8*t2-Nz-1011,1024)),ceild(8*t3-Ny-1011,1024));t4<=min(min(floord(4*Nt+Nx-9,1024),floord(4*t1+Nx-1,1024)),floord(8*t3+Nx-5,1024));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4)),t1);t5<=min(min(min(2*t3,Nt-1),t1+1),256*t4+254);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(1024*t4,4*t5+4); ubv=min(1024*t4+1023,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 1024; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= Nt - 1; t1++) { lbp = ceild(t1 + 1, 2); ubp = min(floord(4 * Nt + Nz - 9, 8), floord(4 * t1 + Nz - 2, 8)); for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(ceild(t1, 2), ceild(8 * t2 - Nz + 5, 8)); t3 <= min(floord(4 * Nt + Ny - 9, 8), floord(4 * t1 + Ny - 1, 8)); t3++) { for (t4 = max(max(ceild(t1 - 254, 256), ceild(8 * t2 - Nz - 1011, 1024)), ceild(8 * t3 - Ny - 1011, 1024)); t4 <= min(min(floord(4 * Nt + Nx - 9, 1024), floord(4 * t1 + Nx - 1, 1024)), floord(8 * t3 + Nx - 5, 1024)); t4++) { for (t5 = max(max(max(max(0, ceild(8 * t2 - Nz + 5, 4)), ceild(8 * t3 - Ny + 5, 4)), ceild(1024 * t4 - Nx + 5, 4)), t1); t5 <= min(min(min(2 * t3, Nt - 1), t1 + 1), 256 * t4 + 254); t5++) { for (t6 = max(max(8 * t2, 4 * t5 + 4), -8 * t1 + 8 * t2 + 8 * t5 - 7); t6 <= min(min(8 * t2 + 7, -8 * t1 + 8 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = max(8 * t3, 4 * t5 + 4); t7 <= min(8 * t3 + 7, 4 * t5 + Ny - 5); t7++) { lbv = max(1024 * t4, 4 * t5 + 4); ubv = min(1024 * t4 + 1023, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((((((((((((coef[0][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef[1][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]))) + (coef[3][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef[4][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[5][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]))) + (coef[6][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef[7][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[8][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]))) + (coef[9][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef[10][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[11][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]))) + (coef[12][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 1024; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= Nt - 1; t1++) { lbp = ceild(t1 + 1, 2); ubp = min(floord(4 * Nt + Nz - 9, 8), floord(4 * t1 + Nz - 2, 8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(ceild(t1, 2), ceild(8 * t2 - Nz + 5, 8)); t3 <= min(floord(4 * Nt + Ny - 9, 8), floord(4 * t1 + Ny - 1, 8)); t3++) { for (t4 = max(max(ceild(t1 - 254, 256), ceild(8 * t2 - Nz - 1011, 1024)), ceild(8 * t3 - Ny - 1011, 1024)); t4 <= min(min(floord(4 * Nt + Nx - 9, 1024), floord(4 * t1 + Nx - 1, 1024)), floord(8 * t3 + Nx - 5, 1024)); t4++) { for (t5 = max(max(max(max(0, ceild(8 * t2 - Nz + 5, 4)), ceild(8 * t3 - Ny + 5, 4)), ceild(1024 * t4 - Nx + 5, 4)), t1); t5 <= min(min(min(2 * t3, Nt - 1), t1 + 1), 256 * t4 + 254); t5++) { for (t6 = max(max(8 * t2, 4 * t5 + 4), -8 * t1 + 8 * t2 + 8 * t5 - 7); t6 <= min(min(8 * t2 + 7, -8 * t1 + 8 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = max(8 * t3, 4 * t5 + 4); t7 <= min(8 * t3 + 7, 4 * t5 + Ny - 5); t7++) { lbv = max(1024 * t4, 4 * t5 + 4); ubv = min(1024 * t4 + 1023, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((((((((((((coef[0][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef[1][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]))) + (coef[3][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef[4][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[5][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]))) + (coef[6][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef[7][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[8][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]))) + (coef[9][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef[10][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[11][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]))) + (coef[12][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
maxpool_with_mask.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. /* * Highly specialized code, only works for TP3 L1 */ #pragma once #include "core/common/common.h" #include "core/framework/op_kernel.h" #include "core/framework/tensor.h" #include "core/providers/cpu/nn/pool_base.h" #include "core/platform/threadpool.h" namespace onnxruntime { namespace contrib { template <typename T> struct MaxpoolWithMask1DTask final { const T* X_data; const int32_t* M_data; T* Y_data; int64_t x_step; int64_t y_step; int64_t pooled_height; int64_t stride_h; int64_t height; int64_t total_mask_channels; const std::vector<int64_t>& kernel_shape; const std::vector<int64_t>& pads; TensorOpCost Cost() { double loop_count = static_cast<double>(pooled_height * kernel_shape[0]); return TensorOpCost{loop_count, loop_count, loop_count}; } void operator()(std::ptrdiff_t begin, std::ptrdiff_t end) const { #ifdef _OPENMP #pragma omp parallel for #endif for (int64_t c = begin; c < end; ++c) { operator()(c); } } void operator()(std::ptrdiff_t c) const { const T* x_d = X_data + c * x_step; const int32_t* m_d = M_data + (c * x_step) % total_mask_channels; T* y_d = Y_data + c * y_step; for (int64_t ph = 0; ph < pooled_height; ++ph) { int64_t hstart = ph * stride_h - pads[0]; int64_t hend = std::min(hstart + kernel_shape[0], height); hstart = std::max(hstart, static_cast<int64_t>(0)); T Yh = std::numeric_limits<T>::lowest(); for (int64_t h = hstart; h < hend; ++h) { if (h >= 0 && m_d[h] == 0) break; // if mask == 0, stop if (x_d[h] > Yh) { Yh = x_d[h]; } } y_d[ph] = Yh; } } }; template <typename T> struct MaxpoolWithMask2DTask final { const T* X_data; const int32_t* M_data; T* Y_data; int64_t x_step; int64_t y_step; int64_t pooled_height; int64_t pooled_width; int64_t stride_h; int64_t stride_w; int64_t height; int64_t width; int64_t total_mask_channels; const std::vector<int64_t>& kernel_shape; const std::vector<int64_t>& pads; TensorOpCost Cost() { double loop_count = static_cast<double>(pooled_height * kernel_shape[0]); return TensorOpCost{loop_count, loop_count, loop_count}; } void operator()(std::ptrdiff_t begin, std::ptrdiff_t end) const { #ifdef _OPENMP #pragma omp parallel for #endif for (int64_t c = begin; c < end; ++c) { operator()(c); } } void operator()(std::ptrdiff_t c) const { const T* x_d = X_data + c * x_step; const int32_t* m_d = M_data + (c * x_step) % total_mask_channels; T* y_d = Y_data + c * y_step; for (int64_t ph = 0; ph < pooled_height; ++ph) { int64_t hstart = ph * stride_h - pads[0]; int64_t hend = std::min(hstart + kernel_shape[0], height); hstart = std::max(hstart, static_cast<int64_t>(0)); for (int64_t pw = 0; pw < pooled_width; ++pw) { int64_t wstart = pw * stride_w - pads[1]; int64_t wend = std::min(wstart + kernel_shape[1], width); wstart = std::max(wstart, static_cast<int64_t>(0)); const int64_t pool_index = ph * pooled_width + pw; T Yh = std::numeric_limits<T>::lowest(); for (int64_t h = hstart; h < hend; ++h) { for (int64_t w = wstart; w < wend; ++w) { const int64_t input_index = h * width + w; if (input_index > 0 && m_d[input_index] == 0) break; // if mask == 0, break if (x_d[input_index] > Yh) { Yh = x_d[input_index]; } } } y_d[pool_index] = Yh; } } } }; template <typename T> struct MaxpoolWithMask3DTask final { const T* X_data; const int32_t* M_data; T* Y_data; int64_t x_step; int64_t y_step; int64_t pooled_height; int64_t pooled_width; int64_t pooled_depth; int64_t stride_h; int64_t stride_w; int64_t stride_d; int64_t height; int64_t width; int64_t depth; int64_t total_mask_channels; const std::vector<int64_t>& kernel_shape; const std::vector<int64_t>& pads; TensorOpCost Cost() { double loop_count = static_cast<double>(pooled_height * kernel_shape[0]); return TensorOpCost{loop_count, loop_count, loop_count}; } void operator()(std::ptrdiff_t begin, std::ptrdiff_t end) const { #ifdef _OPENMP #pragma omp parallel for #endif for (int64_t c = begin; c < end; ++c) { operator()(c); } } void operator()(std::ptrdiff_t c) const { const T* x_d = X_data + c * x_step; const int32_t* m_d = M_data + (c * x_step) % total_mask_channels; T* y_d = Y_data + c * y_step; for (int64_t ph = 0; ph < pooled_height; ++ph) { int64_t hstart = ph * stride_h - pads[0]; int64_t hend = std::min(hstart + kernel_shape[0], height); hstart = std::max(hstart, static_cast<int64_t>(0)); for (int64_t pw = 0; pw < pooled_width; ++pw) { int64_t wstart = pw * stride_w - pads[1]; int64_t wend = std::min(wstart + kernel_shape[1], width); wstart = std::max(wstart, static_cast<int64_t>(0)); for (int64_t pd = 0; pd < pooled_depth; ++pd) { int64_t dstart = pd * stride_d - pads[2]; int64_t dend = std::min(dstart + kernel_shape[2], depth); dstart = std::max(dstart, static_cast<int64_t>(0)); const int64_t pool_index = ph * pooled_width * pooled_depth + pw * pooled_depth + pd; T Yh = std::numeric_limits<T>::lowest(); for (int64_t h = hstart; h < hend; ++h) { for (int64_t w = wstart; w < wend; ++w) { for (int64_t d = dstart; d < dend; ++d) { const int64_t input_index = h * width * depth + w * depth + d; if (input_index > 0 && m_d[input_index] == 0) break; // if mask == 0, break if (x_d[input_index] > Yh) { Yh = x_d[input_index]; } } } } y_d[pool_index] = Yh; } } } } }; template <typename T> inline static void RunMaxpoolLoop(concurrency::ThreadPool* tp, std::ptrdiff_t total_channels, T&& task) { #ifdef _OPENMP ORT_UNUSED_PARAMETER(tp); task(0, total_channels); #else concurrency::ThreadPool::TryParallelFor(tp, total_channels, task.Cost(), task); #endif } class MaxpoolWithMask : public OpKernel, public PoolBase { public: MaxpoolWithMask(const OpKernelInfo& info) : OpKernel(info), PoolBase(info) { } Status Compute(OpKernelContext* context) const override { concurrency::ThreadPool* tp = context->GetOperatorThreadPool(); const Tensor* X = context->Input<Tensor>(0); const Tensor* M = context->Input<Tensor>(1); const TensorShape& x_shape = X->Shape(); const TensorShape& m_shape = M->Shape(); ORT_RETURN_IF_NOT(x_shape.NumDimensions() >= 3, "Input dimension cannot be less than 3."); // TODO: fix this checker later // ONNXRUNTIME_RETURN_IF_NOT((x_shape[2] == m_shape[2]) && (x_shape[3] == m_shape[3]), " Input shape and mask shape // mismatch: ", x_shape, " vs ", m_shape); std::vector<int64_t> pads = pool_attrs_.pads; std::vector<int64_t> kernel_shape = pool_attrs_.kernel_shape; std::vector<int64_t> output_dims = pool_attrs_.SetOutputSize(x_shape, x_shape[1], &pads); Tensor* Y = context->Output(0, TensorShape(output_dims)); const float* X_data = X->template Data<float>(); const int32_t* M_data = M->template Data<int32_t>(); float* Y_data = Y->template MutableData<float>(); // The main loop int64_t channels = x_shape[1]; int64_t height = x_shape[2]; int64_t width = kernel_shape.size() > 1 ? x_shape[3] : 1; int64_t depth = kernel_shape.size() > 2 ? x_shape[4] : 1; int64_t pooled_height = output_dims[2]; int64_t pooled_width = kernel_shape.size() > 1 ? output_dims[3] : 1; int64_t pooled_depth = kernel_shape.size() > 2 ? output_dims[4] : 1; switch (kernel_shape.size()) { case 1: { int64_t x_step = height; int64_t y_step = pooled_height; const int64_t total_channels = x_shape[0] * channels; const int64_t total_mask_channels = m_shape[0] * m_shape[1]; RunMaxpoolLoop<MaxpoolWithMask1DTask<float>>(tp, total_channels, {X_data, M_data, Y_data, x_step, y_step, pooled_height, stride_h(), height, total_mask_channels, kernel_shape, pads}); break; } case 2: { int64_t x_step = height * width; int64_t y_step = pooled_height * pooled_width; const int64_t total_channels = x_shape[0] * channels; const int64_t total_mask_channels = m_shape[0] * m_shape[1]; RunMaxpoolLoop<MaxpoolWithMask2DTask<float>>( tp, total_channels, {X_data, M_data, Y_data, x_step, y_step, pooled_height, pooled_width, stride_h(), stride_w(), height, width, total_mask_channels, kernel_shape, pads}); break; } case 3: { int64_t x_step = height * width * depth; int64_t y_step = pooled_height * pooled_width * pooled_depth; const int64_t total_channels = x_shape[0] * channels; const int64_t total_mask_channels = m_shape[0] * m_shape[1]; RunMaxpoolLoop<MaxpoolWithMask3DTask<float>>( tp, total_channels, {X_data, M_data, Y_data, x_step, y_step, pooled_height, pooled_width, pooled_depth, stride_h(), stride_w(), stride_d(), height, width, depth, total_mask_channels, kernel_shape, pads}); break; } default: return Status(common::ONNXRUNTIME, common::INVALID_ARGUMENT, "Unsupported pooling size : "); } return Status::OK(); } }; } // namespace contrib } // namespace onnxruntime
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR * mat, const ALPHA_Number * x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number * y, const ALPHA_INT ldy) { ALPHA_INT rowC = mat->rows; ALPHA_INT colC = columns; ALPHA_INT num_threads = alpha_get_thread_num(); for (ALPHA_INT r = 0; r < rowC; ++r) { for (ALPHA_INT c = 0; c < colC; ++c) { alpha_mule(y[index2(r, c, ldy)], beta); alpha_madde(y[index2(r, c, ldy)], alpha, x[index2(r, c, ldx)]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR * mat, const ALPHA_Number * x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number * y, const ALPHA_INT ldy) { ALPHA_INT rowC = mat->rows; ALPHA_INT colC = columns; ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT r = 0; r < rowC; ++r) { for (ALPHA_INT c = 0; c < colC; ++c) { alpha_mule(y[index2(r, c, ldy)], beta); alpha_madde(y[index2(r, c, ldy)], alpha, x[index2(r, c, ldx)]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
GB_unop__isnan_bool_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__isnan_bool_fc32 // op(A') function: GB_unop_tran__isnan_bool_fc32 // C type: bool // A type: GxB_FC32_t // cast: GxB_FC32_t cij = (aij) // unaryop: cij = GB_cisnanf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cisnanf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = (aij) ; \ Cx [pC] = GB_cisnanf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__isnan_bool_fc32 ( bool *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = GB_cisnanf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = GB_cisnanf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__isnan_bool_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__isnan_bool_fc32 // op(A') function: GB_unop_tran__isnan_bool_fc32 // C type: bool // A type: GxB_FC32_t // cast: GxB_FC32_t cij = (aij) // unaryop: cij = GB_cisnanf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cisnanf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = (aij) ; \ Cx [pC] = GB_cisnanf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__isnan_bool_fc32 ( bool *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = GB_cisnanf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = GB_cisnanf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__isnan_bool_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__isnan_bool_fc32 // op(A') function: GB_unop_tran__isnan_bool_fc32 // C type: bool // A type: GxB_FC32_t // cast: GxB_FC32_t cij = (aij) // unaryop: cij = GB_cisnanf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cisnanf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = (aij) ; \ Cx [pC] = GB_cisnanf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__isnan_bool_fc32 ( bool *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = GB_cisnanf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = GB_cisnanf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__isnan_bool_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gen2DTorus.c
#include "defs.h" #define PARALLEL_SDG /* Set this variable to zero to run the data generator on one thread (for debugging purposes) */ double gen2DTorus(graphSDG* SDGdata) { VERT_T *src, *dest; WEIGHT_T *wt; #ifdef _OPENMP omp_lock_t* vLock; #endif double elapsed_time; int seed; elapsed_time = get_seconds(); /* allocate memory for edge tuples */ src = (VERT_T *) malloc(M*sizeof(VERT_T)); dest = (VERT_T *) malloc(M*sizeof(VERT_T)); wt = (WEIGHT_T *) malloc(M*sizeof(WEIGHT_T)); assert(src != NULL); assert(dest != NULL); assert(wt != NULL); /* sprng seed */ seed = 2387; #ifdef _OPENMP #ifdef PARALLEL_SDG omp_set_num_threads(omp_get_max_threads()); // omp_set_num_threads(16); #else omp_set_num_threads(1); #endif #endif #ifdef _OPENMP #pragma omp parallel { #endif int tid, nthreads; #ifdef DIAGNOSTIC double elapsed_time_part; #endif int *stream; LONG_T n, m; LONG_T i, j, x, y; LONG_T x_start, x_end, offset; LONG_T count; #ifdef _OPENMP nthreads = omp_get_num_threads(); tid = omp_get_thread_num(); #else nthreads = 1; tid = 0; #endif /* Initialize RNG stream */ stream = init_sprng(0, tid, nthreads, seed, SPRNG_DEFAULT); #ifdef DIAGNOSTIC if (tid == 0) elapsed_time_part = get_seconds(); #endif n = N; m = M; if (SCALE % 2 == 0) { x = 1<<(SCALE/2); y = 1<<(SCALE/2); } else { x = 1<<((SCALE+1)/2); y = 1<<((SCALE-1)/2); } count = 0; x_start = (x/nthreads)*tid; x_end = (x/nthreads)*(tid+1); if (tid == 0) x_start = 0; if (tid == nthreads-1) x_end = x; offset = 4*x_start*y; fprintf(stderr, "tid: %d, x_start: %d, x_end: %d, offset: %d\n", tid, x_start, x_end, offset); // if (tid == 0) { for (i = x_start; i < x_end; i++) { for (j = 0; j < y; j++) { /* go down */ if (j > 0) { src[offset+count] = y*i + j; dest[offset+count] = y*i + j - 1; } else { src[offset+count] = y*i + j; dest[offset+count] = y*i + y - 1; } count++; /* go up */ if (j < y-1) { src[offset+count] = y*i + j; dest[offset+count] = y*i + j + 1; } else { src[offset+count] = y*i + j; dest[offset+count] = y*i; } count++; /* go left */ if (i > 0) { src[offset+count] = y*i + j; dest[offset+count] = y*(i-1) + j; } else { src[offset+count] = y*i + j; dest[offset+count] = y*(x-1) + j; } count++; /* go right */ if (i < x-1) { src[offset+count] = y*i + j; dest[offset+count] = y*(i+1) + j; } else { src[offset+count] = y*i + j; dest[offset+count] = j; } count++; } } // } #ifdef _OPENMP #pragma omp barrier #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "Tuple generation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<m; i++) { wt[i] = 1 + MaxIntWeight * sprng(stream); } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "Generating edge weights: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif SDGdata->n = n; SDGdata->m = m; SDGdata->startVertex = src; SDGdata->endVertex = dest; SDGdata->weight = wt; #ifdef _OPENMP } #endif elapsed_time = get_seconds() - elapsed_time; return elapsed_time; }
#include "defs.h" #define PARALLEL_SDG /* * Set this variable to zero to run the data generator on one thread (for * debugging purposes) */ double gen2DTorus(graphSDG * SDGdata) { VERT_T *src, *dest; WEIGHT_T *wt; double elapsed_time; int seed; elapsed_time = get_seconds(); /* allocate memory for edge tuples */ src = (VERT_T *) malloc(M * sizeof(VERT_T)); dest = (VERT_T *) malloc(M * sizeof(VERT_T)); wt = (WEIGHT_T *) malloc(M * sizeof(WEIGHT_T)); assert(src != NULL); assert(dest != NULL); assert(wt != NULL); /* sprng seed */ seed = 2387; int tid, nthreads; #ifdef DIAGNOSTIC double elapsed_time_part; #endif int *stream; LONG_T n, m; LONG_T i, j, x, y; LONG_T x_start, x_end, offset; LONG_T count; /* Initialize RNG stream */ stream = init_sprng(0, tid, nthreads, seed, SPRNG_DEFAULT); #ifdef DIAGNOSTIC if (tid == 0) elapsed_time_part = get_seconds(); #endif n = N; m = M; if (SCALE % 2 == 0) { x = 1 << (SCALE / 2); y = 1 << (SCALE / 2); } else { x = 1 << ((SCALE + 1) / 2); y = 1 << ((SCALE - 1) / 2); } count = 0; x_start = (x / nthreads) * tid; x_end = (x / nthreads) * (tid + 1); if (tid == 0) x_start = 0; if (tid == nthreads - 1) x_end = x; offset = 4 * x_start * y; fprintf(stderr, "tid: %d, x_start: %d, x_end: %d, offset: %d\n", tid, x_start, x_end, offset); //if (tid == 0) { for (i = x_start; i < x_end; i++) { for (j = 0; j < y; j++) { /* go down */ if (j > 0) { src[offset + count] = y * i + j; dest[offset + count] = y * i + j - 1; } else { src[offset + count] = y * i + j; dest[offset + count] = y * i + y - 1; } count++; /* go up */ if (j < y - 1) { src[offset + count] = y * i + j; dest[offset + count] = y * i + j + 1; } else { src[offset + count] = y * i + j; dest[offset + count] = y * i; } count++; /* go left */ if (i > 0) { src[offset + count] = y * i + j; dest[offset + count] = y * (i - 1) + j; } else { src[offset + count] = y * i + j; dest[offset + count] = y * (x - 1) + j; } count++; /* go right */ if (i < x - 1) { src[offset + count] = y * i + j; dest[offset + count] = y * (i + 1) + j; } else { src[offset + count] = y * i + j; dest[offset + count] = j; } count++; } } // } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "Tuple generation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif for (i = 0; i < m; i++) { wt[i] = 1 + MaxIntWeight * sprng(stream); } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "Generating edge weights: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif SDGdata->n = n; SDGdata->m = m; SDGdata->startVertex = src; SDGdata->endVertex = dest; SDGdata->weight = wt; elapsed_time = get_seconds() - elapsed_time; return elapsed_time; }
#include "defs.h" #define PARALLEL_SDG /* * Set this variable to zero to run the data generator on one thread (for * debugging purposes) */ double gen2DTorus(graphSDG * SDGdata) { VERT_T *src, *dest; WEIGHT_T *wt; #ifdef _OPENMP omp_lock_t *vLock; #endif double elapsed_time; int seed; elapsed_time = get_seconds(); /* allocate memory for edge tuples */ src = (VERT_T *) malloc(M * sizeof(VERT_T)); dest = (VERT_T *) malloc(M * sizeof(VERT_T)); wt = (WEIGHT_T *) malloc(M * sizeof(WEIGHT_T)); assert(src != NULL); assert(dest != NULL); assert(wt != NULL); /* sprng seed */ seed = 2387; #ifdef _OPENMP #ifdef PARALLEL_SDG omp_set_num_threads(omp_get_max_threads()); //omp_set_num_threads(16); #else omp_set_num_threads(1); #endif #endif #ifdef _OPENMP #pragma omp parallel { #endif int tid, nthreads; #ifdef DIAGNOSTIC double elapsed_time_part; #endif int *stream; LONG_T n, m; LONG_T i, j, x, y; LONG_T x_start, x_end, offset; LONG_T count; #ifdef _OPENMP nthreads = omp_get_num_threads(); tid = omp_get_thread_num(); #else nthreads = 1; tid = 0; #endif /* Initialize RNG stream */ stream = init_sprng(0, tid, nthreads, seed, SPRNG_DEFAULT); #ifdef DIAGNOSTIC if (tid == 0) elapsed_time_part = get_seconds(); #endif n = N; m = M; if (SCALE % 2 == 0) { x = 1 << (SCALE / 2); y = 1 << (SCALE / 2); } else { x = 1 << ((SCALE + 1) / 2); y = 1 << ((SCALE - 1) / 2); } count = 0; x_start = (x / nthreads) * tid; x_end = (x / nthreads) * (tid + 1); if (tid == 0) x_start = 0; if (tid == nthreads - 1) x_end = x; offset = 4 * x_start * y; fprintf(stderr, "tid: %d, x_start: %d, x_end: %d, offset: %d\n", tid, x_start, x_end, offset); //if (tid == 0) { for (i = x_start; i < x_end; i++) { for (j = 0; j < y; j++) { /* go down */ if (j > 0) { src[offset + count] = y * i + j; dest[offset + count] = y * i + j - 1; } else { src[offset + count] = y * i + j; dest[offset + count] = y * i + y - 1; } count++; /* go up */ if (j < y - 1) { src[offset + count] = y * i + j; dest[offset + count] = y * i + j + 1; } else { src[offset + count] = y * i + j; dest[offset + count] = y * i; } count++; /* go left */ if (i > 0) { src[offset + count] = y * i + j; dest[offset + count] = y * (i - 1) + j; } else { src[offset + count] = y * i + j; dest[offset + count] = y * (x - 1) + j; } count++; /* go right */ if (i < x - 1) { src[offset + count] = y * i + j; dest[offset + count] = y * (i + 1) + j; } else { src[offset + count] = y * i + j; dest[offset + count] = j; } count++; } } // } #ifdef _OPENMP #pragma omp barrier #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "Tuple generation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i = 0; i < m; i++) { wt[i] = 1 + MaxIntWeight * sprng(stream); } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "Generating edge weights: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif SDGdata->n = n; SDGdata->m = m; SDGdata->startVertex = src; SDGdata->endVertex = dest; SDGdata->weight = wt; #ifdef _OPENMP } #endif elapsed_time = get_seconds() - elapsed_time; return elapsed_time; }
c-tree.h
/* Definitions for C parsing and type checking. Copyright (C) 1987-2019 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_C_TREE_H #define GCC_C_TREE_H #include "c-family/c-common.h" #include "diagnostic.h" /* struct lang_identifier is private to c-decl.c, but langhooks.c needs to know how big it is. This is sanity-checked in c-decl.c. */ #define C_SIZEOF_STRUCT_LANG_IDENTIFIER \ (sizeof (struct c_common_identifier) + 3 * sizeof (void *)) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */ #define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE) /* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE nonzero if the definition of the type has already started. */ #define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE) /* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable declarations whose type would be completed by completing that type. */ #define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE) /* In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. C_RID_CODE (node) is then the RID_* value of the keyword. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID) /* Record whether a type or decl was written with nonconstant size. Note that TYPE_SIZE may have simplified to a constant. */ #define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE) #define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE) /* Record whether a type is defined inside a struct or union type. This is used for -Wc++-compat. */ #define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE) /* Record whether an "incomplete type" error was given for the type. */ #define C_TYPE_ERROR_REPORTED(TYPE) TYPE_LANG_FLAG_3 (TYPE) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was defined without an explicit return type. */ #define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */ #define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP) /* For a PARM_DECL, nonzero if it was declared as an array. */ #define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0 (NODE) /* For FUNCTION_DECLs, evaluates true if the decl is built-in but has been declared. */ #define C_DECL_DECLARED_BUILTIN(EXP) \ DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP)) /* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a built-in prototype and does not have a non-built-in prototype. */ #define C_DECL_BUILTIN_PROTOTYPE(EXP) \ DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a decl was declared register. This is strictly a front-end flag, whereas DECL_REGISTER is used for code generation; they may differ for structures with volatile fields. */ #define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP) /* Record whether a decl was used in an expression anywhere except an unevaluated operand of sizeof / typeof / alignof. This is only used for functions declared static but not defined, though outside sizeof and typeof it is set for other function decls as well. */ #define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a variable has been declared threadprivate by #pragma omp threadprivate. */ #define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL)) /* Set on VAR_DECLs for compound literals. */ #define C_DECL_COMPOUND_LITERAL_P(DECL) \ DECL_LANG_FLAG_5 (VAR_DECL_CHECK (DECL)) /* Nonzero for a decl which either doesn't exist or isn't a prototype. N.B. Could be simplified if all built-in decls had complete prototypes (but this is presently difficult because some of them need FILE*). */ #define C_DECL_ISNT_PROTOTYPE(EXP) \ (EXP == 0 \ || (!prototype_p (TREE_TYPE (EXP)) \ && !fndecl_built_in_p (EXP))) /* For FUNCTION_TYPE, a hidden list of types of arguments. The same as TYPE_ARG_TYPES for functions with prototypes, but created for functions without prototypes. */ #define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE) /* For a CONSTRUCTOR, whether some initializer contains a subexpression meaning it is not a constant expression. */ #define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR)) /* For a SAVE_EXPR, nonzero if the operand of the SAVE_EXPR has already been folded. */ #define SAVE_EXPR_FOLDED_P(EXP) TREE_LANG_FLAG_1 (SAVE_EXPR_CHECK (EXP)) /* Record parser information about an expression that is irrelevant for code generation alongside a tree representing its value. */ struct c_expr { /* The value of the expression. */ tree value; /* Record the original unary/binary operator of an expression, which may have been changed by fold, STRING_CST for unparenthesized string constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls (even if parenthesized), for subexpressions, and for non-constant initializers, or ERROR_MARK for other expressions (including parenthesized expressions). */ enum tree_code original_code; /* If not NULL, the original type of an expression. This will differ from the type of the value field for an enum constant. The type of an enum constant is a plain integer type, but this field will be the enum type. */ tree original_type; /* The source range of this expression. This is redundant for node values that have locations, but not all node kinds have locations (e.g. constants, and references to params, locals, etc), so we stash a copy here. */ source_range src_range; /* Access to the first and last locations within the source spelling of this expression. */ location_t get_start () const { return src_range.m_start; } location_t get_finish () const { return src_range.m_finish; } location_t get_location () const { if (EXPR_HAS_LOCATION (value)) return EXPR_LOCATION (value); else return make_location (get_start (), get_start (), get_finish ()); } /* Set the value to error_mark_node whilst ensuring that src_range is initialized. */ void set_error () { value = error_mark_node; src_range.m_start = UNKNOWN_LOCATION; src_range.m_finish = UNKNOWN_LOCATION; } }; /* Type alias for struct c_expr. This allows to use the structure inside the VEC types. */ typedef struct c_expr c_expr_t; /* A kind of type specifier. Note that this information is currently only used to distinguish tag definitions, tag references and typeof uses. */ enum c_typespec_kind { /* No typespec. This appears only in struct c_declspec. */ ctsk_none, /* A reserved keyword type specifier. */ ctsk_resword, /* A reference to a tag, previously declared, such as "struct foo". This includes where the previous declaration was as a different kind of tag, in which case this is only valid if shadowing that tag in an inner scope. */ ctsk_tagref, /* A reference to a tag, not previously declared in a visible scope. */ ctsk_tagfirstref, /* A definition of a tag such as "struct foo { int a; }". */ ctsk_tagdef, /* A typedef name. */ ctsk_typedef, /* An ObjC-specific kind of type specifier. */ ctsk_objc, /* A typeof specifier, or _Atomic ( type-name ). */ ctsk_typeof }; /* A type specifier: this structure is created in the parser and passed to declspecs_add_type only. */ struct c_typespec { /* What kind of type specifier this is. */ enum c_typespec_kind kind; /* Whether the expression has operands suitable for use in constant expressions. */ bool expr_const_operands; /* The specifier itself. */ tree spec; /* An expression to be evaluated before the type specifier, in the case of typeof specifiers, or NULL otherwise or if no such expression is required for a particular typeof specifier. In particular, when typeof is applied to an expression of variably modified type, that expression must be evaluated in order to determine array sizes that form part of the type, but the expression itself (as opposed to the array sizes) forms no part of the type and so needs to be recorded separately. */ tree expr; }; /* A storage class specifier. */ enum c_storage_class { csc_none, csc_auto, csc_extern, csc_register, csc_static, csc_typedef }; /* A type specifier keyword "void", "_Bool", "char", "int", "float", "double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum", or none of these. */ enum c_typespec_keyword { cts_none, cts_void, cts_bool, cts_char, cts_int, cts_float, cts_int_n, cts_double, cts_dfloat32, cts_dfloat64, cts_dfloat128, cts_floatn_nx, cts_fract, cts_accum, cts_auto_type }; /* This enum lists all the possible declarator specifiers, storage class or attribute that a user can write. There is at least one enumerator per possible declarator specifier in the struct c_declspecs below. It is used to index the array of declspec locations in struct c_declspecs. */ enum c_declspec_word { cdw_typespec /* A catch-all for a typespec. */, cdw_storage_class /* A catch-all for a storage class */, cdw_attributes, cdw_typedef, cdw_explicit_signed, cdw_deprecated, cdw_default_int, cdw_long, cdw_long_long, cdw_short, cdw_signed, cdw_unsigned, cdw_complex, cdw_inline, cdw_noreturn, cdw_thread, cdw_const, cdw_volatile, cdw_restrict, cdw_atomic, cdw_saturating, cdw_alignas, cdw_address_space, cdw_gimple, cdw_rtl, cdw_number_of_elements /* This one must always be the last enumerator. */ }; enum c_declspec_il { cdil_none, cdil_gimple, /* __GIMPLE */ cdil_gimple_cfg, /* __GIMPLE(cfg) */ cdil_gimple_ssa, /* __GIMPLE(ssa) */ cdil_rtl /* __RTL */ }; /* A sequence of declaration specifiers in C. When a new declaration specifier is added, please update the enum c_declspec_word above accordingly. */ struct c_declspecs { location_t locations[cdw_number_of_elements]; /* The type specified, if a single type specifier such as a struct, union or enum specifier, typedef name or typeof specifies the whole type, or NULL_TREE if none or a keyword such as "void" or "char" is used. Does not include qualifiers. */ tree type; /* Any expression to be evaluated before the type, from a typeof specifier. */ tree expr; /* The attributes from a typedef decl. */ tree decl_attr; /* When parsing, the attributes. Outside the parser, this will be NULL; attributes (possibly from multiple lists) will be passed separately. */ tree attrs; /* The pass to start compiling a __GIMPLE or __RTL function with. */ char *gimple_or_rtl_pass; /* The base-2 log of the greatest alignment required by an _Alignas specifier, in bytes, or -1 if no such specifiers with nonzero alignment. */ int align_log; /* For the __intN declspec, this stores the index into the int_n_* arrays. */ int int_n_idx; /* For the _FloatN and _FloatNx declspec, this stores the index into the floatn_nx_types array. */ int floatn_nx_idx; /* The storage class specifier, or csc_none if none. */ enum c_storage_class storage_class; /* Any type specifier keyword used such as "int", not reflecting modifiers such as "short", or cts_none if none. */ ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8; /* The kind of type specifier if one has been seen, ctsk_none otherwise. */ ENUM_BITFIELD (c_typespec_kind) typespec_kind : 3; ENUM_BITFIELD (c_declspec_il) declspec_il : 3; /* Whether any expressions in typeof specifiers may appear in constant expressions. */ BOOL_BITFIELD expr_const_operands : 1; /* Whether any declaration specifiers have been seen at all. */ BOOL_BITFIELD declspecs_seen_p : 1; /* Whether something other than a storage class specifier or attribute has been seen. This is used to warn for the obsolescent usage of storage class specifiers other than at the start of the list. (Doing this properly would require function specifiers to be handled separately from storage class specifiers.) */ BOOL_BITFIELD non_sc_seen_p : 1; /* Whether the type is specified by a typedef or typeof name. */ BOOL_BITFIELD typedef_p : 1; /* Whether the type is explicitly "signed" or specified by a typedef whose type is explicitly "signed". */ BOOL_BITFIELD explicit_signed_p : 1; /* Whether the specifiers include a deprecated typedef. */ BOOL_BITFIELD deprecated_p : 1; /* Whether the type defaulted to "int" because there were no type specifiers. */ BOOL_BITFIELD default_int_p : 1; /* Whether "long" was specified. */ BOOL_BITFIELD long_p : 1; /* Whether "long" was specified more than once. */ BOOL_BITFIELD long_long_p : 1; /* Whether "short" was specified. */ BOOL_BITFIELD short_p : 1; /* Whether "signed" was specified. */ BOOL_BITFIELD signed_p : 1; /* Whether "unsigned" was specified. */ BOOL_BITFIELD unsigned_p : 1; /* Whether "complex" was specified. */ BOOL_BITFIELD complex_p : 1; /* Whether "inline" was specified. */ BOOL_BITFIELD inline_p : 1; /* Whether "_Noreturn" was speciied. */ BOOL_BITFIELD noreturn_p : 1; /* Whether "__thread" or "_Thread_local" was specified. */ BOOL_BITFIELD thread_p : 1; /* Whether "__thread" rather than "_Thread_local" was specified. */ BOOL_BITFIELD thread_gnu_p : 1; /* Whether "const" was specified. */ BOOL_BITFIELD const_p : 1; /* Whether "volatile" was specified. */ BOOL_BITFIELD volatile_p : 1; /* Whether "restrict" was specified. */ BOOL_BITFIELD restrict_p : 1; /* Whether "_Atomic" was specified. */ BOOL_BITFIELD atomic_p : 1; /* Whether "_Sat" was specified. */ BOOL_BITFIELD saturating_p : 1; /* Whether any alignment specifier (even with zero alignment) was specified. */ BOOL_BITFIELD alignas_p : 1; /* The address space that the declaration belongs to. */ addr_space_t address_space; }; /* The various kinds of declarators in C. */ enum c_declarator_kind { /* An identifier. */ cdk_id, /* A function. */ cdk_function, /* An array. */ cdk_array, /* A pointer. */ cdk_pointer, /* Parenthesized declarator with nested attributes. */ cdk_attrs }; struct c_arg_tag { /* The argument name. */ tree id; /* The type of the argument. */ tree type; }; /* Information about the parameters in a function declarator. */ struct c_arg_info { /* A list of parameter decls. */ tree parms; /* A list of structure, union and enum tags defined. */ vec<c_arg_tag, va_gc> *tags; /* A list of argument types to go in the FUNCTION_TYPE. */ tree types; /* A list of non-parameter decls (notably enumeration constants) defined with the parameters. */ tree others; /* A compound expression of VLA sizes from the parameters, or NULL. In a function definition, these are used to ensure that side-effects in sizes of arrays converted to pointers (such as a parameter int i[n++]) take place; otherwise, they are ignored. */ tree pending_sizes; /* True when these arguments had [*]. */ BOOL_BITFIELD had_vla_unspec : 1; }; /* A declarator. */ struct c_declarator { /* The kind of declarator. */ enum c_declarator_kind kind; location_t id_loc; /* Currently only set for cdk_id, cdk_array. */ /* Except for cdk_id, the contained declarator. For cdk_id, NULL. */ struct c_declarator *declarator; union { /* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract declarator. */ tree id; /* For functions. */ struct c_arg_info *arg_info; /* For arrays. */ struct { /* The array dimension, or NULL for [] and [*]. */ tree dimen; /* The qualifiers inside []. */ int quals; /* The attributes (currently ignored) inside []. */ tree attrs; /* Whether [static] was used. */ BOOL_BITFIELD static_p : 1; /* Whether [*] was used. */ BOOL_BITFIELD vla_unspec_p : 1; } array; /* For pointers, the qualifiers on the pointer type. */ int pointer_quals; /* For attributes. */ tree attrs; } u; }; /* A type name. */ struct c_type_name { /* The declaration specifiers. */ struct c_declspecs *specs; /* The declarator. */ struct c_declarator *declarator; }; /* A parameter. */ struct c_parm { /* The declaration specifiers, minus any prefix attributes. */ struct c_declspecs *specs; /* The attributes. */ tree attrs; /* The declarator. */ struct c_declarator *declarator; /* The location of the parameter. */ location_t loc; }; /* Used when parsing an enum. Initialized by start_enum. */ struct c_enum_contents { /* While defining an enum type, this is 1 plus the last enumerator constant value. */ tree enum_next_value; /* Nonzero means that there was overflow computing enum_next_value. */ int enum_overflow; }; /* A type of reference to a static identifier in an inline function. */ enum c_inline_static_type { /* Identifier with internal linkage used in function that may be an inline definition (i.e., file-scope static). */ csi_internal, /* Modifiable object with static storage duration defined in function that may be an inline definition (i.e., local static). */ csi_modifiable }; /* in c-parser.c */ extern void c_parse_init (void); extern bool c_keyword_starts_typename (enum rid keyword); /* in c-aux-info.c */ extern void gen_aux_info_record (tree, int, int, int); /* in c-decl.c */ struct c_spot_bindings; struct c_struct_parse_info; extern struct obstack parser_obstack; extern tree c_break_label; extern tree c_cont_label; extern bool global_bindings_p (void); extern tree pushdecl (tree); extern void push_scope (void); extern tree pop_scope (void); extern void c_bindings_start_stmt_expr (struct c_spot_bindings *); extern void c_bindings_end_stmt_expr (struct c_spot_bindings *); extern void record_inline_static (location_t, tree, tree, enum c_inline_static_type); extern void c_init_decl_processing (void); extern void c_print_identifier (FILE *, tree, int); extern int quals_from_declspecs (const struct c_declspecs *); extern struct c_declarator *build_array_declarator (location_t, tree, struct c_declspecs *, bool, bool); extern tree build_enumerator (location_t, location_t, struct c_enum_contents *, tree, tree); extern tree check_for_loop_decls (location_t, bool); extern void mark_forward_parm_decls (void); extern void declare_parm_level (void); extern void undeclared_variable (location_t, tree); extern tree lookup_label_for_goto (location_t, tree); extern tree declare_label (tree); extern tree define_label (location_t, tree); extern struct c_spot_bindings *c_get_switch_bindings (void); extern void c_release_switch_bindings (struct c_spot_bindings *); extern bool c_check_switch_jump_warnings (struct c_spot_bindings *, location_t, location_t); extern void finish_decl (tree, location_t, tree, tree, tree); extern tree finish_enum (tree, tree, tree); extern void finish_function (void); extern tree finish_struct (location_t, tree, tree, tree, struct c_struct_parse_info *); extern struct c_arg_info *build_arg_info (void); extern struct c_arg_info *get_parm_info (bool, tree); extern tree grokfield (location_t, struct c_declarator *, struct c_declspecs *, tree, tree *); extern tree groktypename (struct c_type_name *, tree *, bool *); extern tree grokparm (const struct c_parm *, tree *); extern tree implicitly_declare (location_t, tree); extern void keep_next_level (void); extern void pending_xref_error (void); extern void c_push_function_context (void); extern void c_pop_function_context (void); extern void push_parm_decl (const struct c_parm *, tree *); extern struct c_declarator *set_array_declarator_inner (struct c_declarator *, struct c_declarator *); extern tree c_builtin_function (tree); extern tree c_builtin_function_ext_scope (tree); extern void shadow_tag (const struct c_declspecs *); extern void shadow_tag_warned (const struct c_declspecs *, int); extern tree start_enum (location_t, struct c_enum_contents *, tree); extern bool start_function (struct c_declspecs *, struct c_declarator *, tree); extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool, tree); extern tree start_struct (location_t, enum tree_code, tree, struct c_struct_parse_info **); extern void store_parm_decls (void); extern void store_parm_decls_from (struct c_arg_info *); extern void temp_store_parm_decls (tree, tree); extern void temp_pop_parm_decls (void); extern tree xref_tag (enum tree_code, tree); extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree); extern struct c_parm *build_c_parm (struct c_declspecs *, tree, struct c_declarator *, location_t); extern struct c_declarator *build_attrs_declarator (tree, struct c_declarator *); extern struct c_declarator *build_function_declarator (struct c_arg_info *, struct c_declarator *); extern struct c_declarator *build_id_declarator (tree); extern struct c_declarator *make_pointer_declarator (struct c_declspecs *, struct c_declarator *); extern struct c_declspecs *build_null_declspecs (void); extern struct c_declspecs *declspecs_add_qual (location_t, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_type (location_t, struct c_declspecs *, struct c_typespec); extern struct c_declspecs *declspecs_add_scspec (location_t, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_attrs (location_t, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_addrspace (location_t, struct c_declspecs *, addr_space_t); extern struct c_declspecs *declspecs_add_alignas (location_t, struct c_declspecs *, tree); extern struct c_declspecs *finish_declspecs (struct c_declspecs *); /* in c-objc-common.c */ extern bool c_objc_common_init (void); extern bool c_missing_noreturn_ok_p (tree); extern bool c_warn_unused_global_decl (const_tree); extern void c_initialize_diagnostics (diagnostic_context *); extern bool c_vla_unspec_p (tree x, tree fn); extern alias_set_type c_get_alias_set (tree); /* in c-typeck.c */ extern int in_alignof; extern int in_sizeof; extern int in_typeof; extern tree c_last_sizeof_arg; extern location_t c_last_sizeof_loc; extern struct c_switch *c_switch_stack; extern tree c_objc_common_truthvalue_conversion (location_t, tree); extern tree require_complete_type (location_t, tree); extern bool same_translation_unit_p (const_tree, const_tree); extern int comptypes (tree, tree); extern int comptypes_check_different_types (tree, tree, bool *); extern bool c_vla_type_p (const_tree); extern bool c_mark_addressable (tree, bool = false); extern void c_incomplete_type_error (location_t, const_tree, const_tree); extern tree c_type_promotes_to (tree); extern struct c_expr default_function_array_conversion (location_t, struct c_expr); extern struct c_expr default_function_array_read_conversion (location_t, struct c_expr); extern struct c_expr convert_lvalue_to_rvalue (location_t, struct c_expr, bool, bool); extern tree decl_constant_value_1 (tree, bool); extern void mark_exp_read (tree); extern tree composite_type (tree, tree); extern tree build_component_ref (location_t, tree, tree, location_t); extern tree build_array_ref (location_t, tree, tree); extern tree build_external_ref (location_t, tree, bool, tree *); extern void pop_maybe_used (bool); extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr); extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *); extern struct c_expr parser_build_unary_op (location_t, enum tree_code, struct c_expr); extern struct c_expr parser_build_binary_op (location_t, enum tree_code, struct c_expr, struct c_expr); extern tree build_conditional_expr (location_t, tree, bool, tree, tree, location_t, tree, tree, location_t); extern tree build_compound_expr (location_t, tree, tree); extern tree c_cast_expr (location_t, struct c_type_name *, tree); extern tree build_c_cast (location_t, tree, tree); extern void store_init_value (location_t, tree, tree, tree); extern void maybe_warn_string_init (location_t, tree, struct c_expr); extern void start_init (tree, tree, int, rich_location *); extern void finish_init (void); extern void really_start_incremental_init (tree); extern void finish_implicit_inits (location_t, struct obstack *); extern void push_init_level (location_t, int, struct obstack *); extern struct c_expr pop_init_level (location_t, int, struct obstack *, location_t); extern void set_init_index (location_t, tree, tree, struct obstack *); extern void set_init_label (location_t, tree, location_t, struct obstack *); extern void process_init_element (location_t, struct c_expr, bool, struct obstack *); extern tree build_compound_literal (location_t, tree, tree, bool, unsigned int); extern void check_compound_literal_type (location_t, struct c_type_name *); extern tree c_start_case (location_t, location_t, tree, bool); extern void c_finish_case (tree, tree); extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool, bool); extern tree build_asm_stmt (bool, tree); extern int c_types_compatible_p (tree, tree); extern tree c_begin_compound_stmt (bool); extern tree c_end_compound_stmt (location_t, tree, bool); extern void c_finish_if_stmt (location_t, tree, tree, tree); extern void c_finish_loop (location_t, location_t, tree, location_t, tree, tree, tree, tree, bool); extern tree c_begin_stmt_expr (void); extern tree c_finish_stmt_expr (location_t, tree); extern tree c_process_expr_stmt (location_t, tree); extern tree c_finish_expr_stmt (location_t, tree); extern tree c_finish_return (location_t, tree, tree); extern tree c_finish_bc_stmt (location_t, tree *, bool); extern tree c_finish_goto_label (location_t, tree); extern tree c_finish_goto_ptr (location_t, tree); extern tree c_expr_to_decl (tree, bool *, bool *); extern tree c_finish_omp_construct (location_t, enum tree_code, tree, tree); extern tree c_finish_oacc_data (location_t, tree, tree); extern tree c_finish_oacc_host_data (location_t, tree, tree); extern tree c_begin_omp_parallel (void); extern tree c_finish_omp_parallel (location_t, tree, tree); extern tree c_begin_omp_task (void); extern tree c_finish_omp_task (location_t, tree, tree); extern void c_finish_omp_cancel (location_t, tree); extern void c_finish_omp_cancellation_point (location_t, tree); extern tree c_finish_omp_clauses (tree, enum c_omp_region_type); extern tree c_build_va_arg (location_t, tree, location_t, tree); extern tree c_finish_transaction (location_t, tree, int); extern bool c_tree_equal (tree, tree); extern tree c_build_function_call_vec (location_t, vec<location_t>, tree, vec<tree, va_gc> *, vec<tree, va_gc> *); extern tree c_omp_clause_copy_ctor (tree, tree, tree); /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ extern int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ extern int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ extern int current_function_returns_abnormally; /* In c-decl.c */ /* Tell the binding oracle what kind of binding we are looking for. */ enum c_oracle_request { C_ORACLE_SYMBOL, C_ORACLE_TAG, C_ORACLE_LABEL }; /* If this is non-NULL, then it is a "binding oracle" which can lazily create bindings when needed by the C compiler. The oracle is told the name and type of the binding to create. It can call pushdecl or the like to ensure the binding is visible; or do nothing, leaving the binding untouched. c-decl.c takes note of when the oracle has been called and will not call it again if it fails to create a given binding. */ typedef void c_binding_oracle_function (enum c_oracle_request, tree identifier); extern c_binding_oracle_function *c_binding_oracle; extern void c_finish_incomplete_decl (tree); extern tree c_omp_reduction_id (enum tree_code, tree); extern tree c_omp_reduction_decl (tree); extern tree c_omp_reduction_lookup (tree, tree); extern tree c_check_omp_declare_reduction_r (tree *, int *, void *); extern void c_pushtag (location_t, tree, tree); extern void c_bind (location_t, tree, bool); extern bool tag_exists_p (enum tree_code, tree); /* In c-errors.c */ extern bool pedwarn_c90 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern bool pedwarn_c99 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern bool pedwarn_c11 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern void set_c_expr_source_range (c_expr *expr, location_t start, location_t finish); extern void set_c_expr_source_range (c_expr *expr, source_range src_range); /* In c-fold.c */ extern vec<tree> incomplete_record_decls; #if CHECKING_P namespace selftest { extern void run_c_tests (void); } // namespace selftest #endif /* #if CHECKING_P */ #endif /* ! GCC_C_TREE_H */
#ifndef GCC_C_TREE_H #define GCC_C_TREE_H #include "c-family/c-common.h" #include "diagnostic.h" /* * struct lang_identifier is private to c-decl.c, but langhooks.c needs to * know how big it is. This is sanity-checked in c-decl.c. */ #define C_SIZEOF_STRUCT_LANG_IDENTIFIER \ (sizeof (struct c_common_identifier) + 3 * sizeof (void *)) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */ #define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE) /* * In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE nonzero if the definition * of the type has already started. */ #define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE) /* * In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable * declarations whose type would be completed by completing that type. */ #define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE) /* * In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. * C_RID_CODE (node) is then the RID_* value of the keyword. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID) /* * Record whether a type or decl was written with nonconstant size. Note that * TYPE_SIZE may have simplified to a constant. */ #define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE) #define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE) /* * Record whether a type is defined inside a struct or union type. This is * used for -Wc++-compat. */ #define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE) /* Record whether an "incomplete type" error was given for the type. */ #define C_TYPE_ERROR_REPORTED(TYPE) TYPE_LANG_FLAG_3 (TYPE) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* * For a FUNCTION_DECL, nonzero if it was defined without an explicit return * type. */ #define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */ #define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP) /* For a PARM_DECL, nonzero if it was declared as an array. */ #define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0 (NODE) /* * For FUNCTION_DECLs, evaluates true if the decl is built-in but has been * declared. */ #define C_DECL_DECLARED_BUILTIN(EXP) \ DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP)) /* * For FUNCTION_DECLs, evaluates true if the decl is built-in, has a built-in * prototype and does not have a non-built-in prototype. */ #define C_DECL_BUILTIN_PROTOTYPE(EXP) \ DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP)) /* * Record whether a decl was declared register. This is strictly a front-end * flag, whereas DECL_REGISTER is used for code generation; they may differ * for structures with volatile fields. */ #define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP) /* * Record whether a decl was used in an expression anywhere except an * unevaluated operand of sizeof / typeof / alignof. This is only used for * functions declared static but not defined, though outside sizeof and * typeof it is set for other function decls as well. */ #define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP)) /* * Record whether a variable has been declared threadprivate by #define * C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL)) * /* Set on VAR_DECLs for compound literals. */ #define C_DECL_COMPOUND_LITERAL_P(DECL) \ DECL_LANG_FLAG_5 (VAR_DECL_CHECK (DECL)) /* * Nonzero for a decl which either doesn't exist or isn't a prototype. N.B. * Could be simplified if all built-in decls had complete prototypes (but * this is presently difficult because some of them need FILE*). */ #define C_DECL_ISNT_PROTOTYPE(EXP) \ (EXP == 0 \ || (!prototype_p (TREE_TYPE (EXP)) \ && !fndecl_built_in_p (EXP))) /* * For FUNCTION_TYPE, a hidden list of types of arguments. The same as * TYPE_ARG_TYPES for functions with prototypes, but created for functions * without prototypes. */ #define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE) /* * For a CONSTRUCTOR, whether some initializer contains a subexpression * meaning it is not a constant expression. */ #define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR)) /* * For a SAVE_EXPR, nonzero if the operand of the SAVE_EXPR has already been * folded. */ #define SAVE_EXPR_FOLDED_P(EXP) TREE_LANG_FLAG_1 (SAVE_EXPR_CHECK (EXP)) /* * Record parser information about an expression that is irrelevant for code * generation alongside a tree representing its value. */ struct c_expr { /* The value of the expression. */ tree value; /* * Record the original unary/binary operator of an expression, which may * have been changed by fold, STRING_CST for unparenthesized string * constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls (even if * parenthesized), for subexpressions, and for non-constant initializers, * or ERROR_MARK for other expressions (including parenthesized * expressions). */ enum tree_code original_code; /* * If not NULL, the original type of an expression. This will differ * from the type of the value field for an enum constant. The type of an * enum constant is a plain integer type, but this field will be the enum * type. */ tree original_type; /* * The source range of this expression. This is redundant for node * values that have locations, but not all node kinds have locations * (e.g. constants, and references to params, locals, etc), so we stash a * copy here. */ source_range src_range; /* * Access to the first and last locations within the source spelling of * this expression. */ location_t get_start() const { return src_range.m_start; } location_t get_finish() const { return src_range.m_finish; } location_t get_location() const { if (EXPR_HAS_LOCATION(value)) return EXPR_LOCATION(value); else return make_location(get_start(), get_start(), get_finish()); } /* * Set the value to error_mark_node whilst ensuring that src_range is * initialized. */ void set_error() { value = error_mark_node; src_range.m_start = UNKNOWN_LOCATION; src_range.m_finish = UNKNOWN_LOCATION; } }; /* * Type alias for struct c_expr. This allows to use the structure inside the * VEC types. */ typedef struct c_expr c_expr_t; /* * A kind of type specifier. Note that this information is currently only * used to distinguish tag definitions, tag references and typeof uses. */ enum c_typespec_kind { /* No typespec. This appears only in struct c_declspec. */ ctsk_none, /* A reserved keyword type specifier. */ ctsk_resword, /* * A reference to a tag, previously declared, such as "struct foo". This * includes where the previous declaration was as a different kind of * tag, in which case this is only valid if shadowing that tag in an * inner scope. */ ctsk_tagref, /* * A reference to a tag, not previously declared in a visible scope. */ ctsk_tagfirstref, /* A definition of a tag such as "struct foo { int a; }". */ ctsk_tagdef, /* A typedef name. */ ctsk_typedef, /* An ObjC-specific kind of type specifier. */ ctsk_objc, /* A typeof specifier, or _Atomic ( type-name ). */ ctsk_typeof }; /* * A type specifier: this structure is created in the parser and passed to * declspecs_add_type only. */ struct c_typespec { /* What kind of type specifier this is. */ enum c_typespec_kind kind; /* * Whether the expression has operands suitable for use in constant * expressions. */ bool expr_const_operands; /* The specifier itself. */ tree spec; /* * An expression to be evaluated before the type specifier, in the case * of typeof specifiers, or NULL otherwise or if no such expression is * required for a particular typeof specifier. In particular, when * typeof is applied to an expression of variably modified type, that * expression must be evaluated in order to determine array sizes that * form part of the type, but the expression itself (as opposed to the * array sizes) forms no part of the type and so needs to be recorded * separately. */ tree expr; }; /* A storage class specifier. */ enum c_storage_class { csc_none, csc_auto, csc_extern, csc_register, csc_static, csc_typedef }; /* * A type specifier keyword "void", "_Bool", "char", "int", "float", * "double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum", * or none of these. */ enum c_typespec_keyword { cts_none, cts_void, cts_bool, cts_char, cts_int, cts_float, cts_int_n, cts_double, cts_dfloat32, cts_dfloat64, cts_dfloat128, cts_floatn_nx, cts_fract, cts_accum, cts_auto_type }; /* * This enum lists all the possible declarator specifiers, storage class or * attribute that a user can write. There is at least one enumerator per * possible declarator specifier in the struct c_declspecs below. * * It is used to index the array of declspec locations in struct c_declspecs. */ enum c_declspec_word { cdw_typespec /* A catch-all for a typespec. */ , cdw_storage_class /* A catch-all for a storage class */ , cdw_attributes, cdw_typedef, cdw_explicit_signed, cdw_deprecated, cdw_default_int, cdw_long, cdw_long_long, cdw_short, cdw_signed, cdw_unsigned, cdw_complex, cdw_inline, cdw_noreturn, cdw_thread, cdw_const, cdw_volatile, cdw_restrict, cdw_atomic, cdw_saturating, cdw_alignas, cdw_address_space, cdw_gimple, cdw_rtl, cdw_number_of_elements /* This one must always be the last * enumerator. */ }; enum c_declspec_il { cdil_none, cdil_gimple, /* __GIMPLE */ cdil_gimple_cfg, /* __GIMPLE(cfg) */ cdil_gimple_ssa, /* __GIMPLE(ssa) */ cdil_rtl /* __RTL */ }; /* * A sequence of declaration specifiers in C. When a new declaration * specifier is added, please update the enum c_declspec_word above * accordingly. */ struct c_declspecs { location_t locations[cdw_number_of_elements]; /* * The type specified, if a single type specifier such as a struct, union * or enum specifier, typedef name or typeof specifies the whole type, or * NULL_TREE if none or a keyword such as "void" or "char" is used. Does * not include qualifiers. */ tree type; /* * Any expression to be evaluated before the type, from a typeof * specifier. */ tree expr; /* The attributes from a typedef decl. */ tree decl_attr; /* * When parsing, the attributes. Outside the parser, this will be NULL; * attributes (possibly from multiple lists) will be passed separately. */ tree attrs; /* The pass to start compiling a __GIMPLE or __RTL function with. */ char *gimple_or_rtl_pass; /* * The base-2 log of the greatest alignment required by an _Alignas * specifier, in bytes, or -1 if no such specifiers with nonzero * alignment. */ int align_log; /* * For the __intN declspec, this stores the index into the int_n_* * arrays. */ int int_n_idx; /* * For the _FloatN and _FloatNx declspec, this stores the index into the * floatn_nx_types array. */ int floatn_nx_idx; /* The storage class specifier, or csc_none if none. */ enum c_storage_class storage_class; /* * Any type specifier keyword used such as "int", not reflecting * modifiers such as "short", or cts_none if none. */ ENUM_BITFIELD(c_typespec_keyword) typespec_word: 8; /* * The kind of type specifier if one has been seen, ctsk_none otherwise. */ ENUM_BITFIELD(c_typespec_kind) typespec_kind: 3; ENUM_BITFIELD(c_declspec_il) declspec_il: 3; /* * Whether any expressions in typeof specifiers may appear in constant * expressions. */ BOOL_BITFIELD expr_const_operands:1; /* Whether any declaration specifiers have been seen at all. */ BOOL_BITFIELD declspecs_seen_p:1; /* * Whether something other than a storage class specifier or attribute * has been seen. This is used to warn for the obsolescent usage of * storage class specifiers other than at the start of the list. (Doing * this properly would require function specifiers to be handled * separately from storage class specifiers.) */ BOOL_BITFIELD non_sc_seen_p:1; /* Whether the type is specified by a typedef or typeof name. */ BOOL_BITFIELD typedef_p:1; /* * Whether the type is explicitly "signed" or specified by a typedef * whose type is explicitly "signed". */ BOOL_BITFIELD explicit_signed_p:1; /* Whether the specifiers include a deprecated typedef. */ BOOL_BITFIELD deprecated_p:1; /* * Whether the type defaulted to "int" because there were no type * specifiers. */ BOOL_BITFIELD default_int_p:1; /* Whether "long" was specified. */ BOOL_BITFIELD long_p:1; /* Whether "long" was specified more than once. */ BOOL_BITFIELD long_long_p:1; /* Whether "short" was specified. */ BOOL_BITFIELD short_p:1; /* Whether "signed" was specified. */ BOOL_BITFIELD signed_p:1; /* Whether "unsigned" was specified. */ BOOL_BITFIELD unsigned_p:1; /* Whether "complex" was specified. */ BOOL_BITFIELD complex_p:1; /* Whether "inline" was specified. */ BOOL_BITFIELD inline_p:1; /* Whether "_Noreturn" was speciied. */ BOOL_BITFIELD noreturn_p:1; /* Whether "__thread" or "_Thread_local" was specified. */ BOOL_BITFIELD thread_p:1; /* Whether "__thread" rather than "_Thread_local" was specified. */ BOOL_BITFIELD thread_gnu_p:1; /* Whether "const" was specified. */ BOOL_BITFIELD const_p:1; /* Whether "volatile" was specified. */ BOOL_BITFIELD volatile_p:1; /* Whether "restrict" was specified. */ BOOL_BITFIELD restrict_p:1; /* Whether "_Atomic" was specified. */ BOOL_BITFIELD atomic_p:1; /* Whether "_Sat" was specified. */ BOOL_BITFIELD saturating_p:1; /* * Whether any alignment specifier (even with zero alignment) was * specified. */ BOOL_BITFIELD alignas_p:1; /* The address space that the declaration belongs to. */ addr_space_t address_space; }; /* The various kinds of declarators in C. */ enum c_declarator_kind { /* An identifier. */ cdk_id, /* A function. */ cdk_function, /* An array. */ cdk_array, /* A pointer. */ cdk_pointer, /* Parenthesized declarator with nested attributes. */ cdk_attrs }; struct c_arg_tag { /* The argument name. */ tree id; /* The type of the argument. */ tree type; }; /* Information about the parameters in a function declarator. */ struct c_arg_info { /* A list of parameter decls. */ tree parms; /* A list of structure, union and enum tags defined. */ vec < c_arg_tag, va_gc > *tags; /* A list of argument types to go in the FUNCTION_TYPE. */ tree types; /* * A list of non-parameter decls (notably enumeration constants) defined * with the parameters. */ tree others; /* * A compound expression of VLA sizes from the parameters, or NULL. In a * function definition, these are used to ensure that side-effects in * sizes of arrays converted to pointers (such as a parameter int i[n++]) * take place; otherwise, they are ignored. */ tree pending_sizes; /* True when these arguments had [*]. */ BOOL_BITFIELD had_vla_unspec:1; }; /* A declarator. */ struct c_declarator { /* The kind of declarator. */ enum c_declarator_kind kind; location_t id_loc; /* Currently only set for cdk_id, cdk_array. */ /* Except for cdk_id, the contained declarator. For cdk_id, NULL. */ struct c_declarator *declarator; union { /* * For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract * declarator. */ tree id; /* For functions. */ struct c_arg_info *arg_info; /* For arrays. */ struct { /* The array dimension, or NULL for [] and [*]. */ tree dimen; /* The qualifiers inside []. */ int quals; /* The attributes (currently ignored) inside []. */ tree attrs; /* Whether [static] was used. */ BOOL_BITFIELD static_p:1; /* Whether [*] was used. */ BOOL_BITFIELD vla_unspec_p:1; } array; /* For pointers, the qualifiers on the pointer type. */ int pointer_quals; /* For attributes. */ tree attrs; } u; }; /* A type name. */ struct c_type_name { /* The declaration specifiers. */ struct c_declspecs *specs; /* The declarator. */ struct c_declarator *declarator; }; /* A parameter. */ struct c_parm { /* The declaration specifiers, minus any prefix attributes. */ struct c_declspecs *specs; /* The attributes. */ tree attrs; /* The declarator. */ struct c_declarator *declarator; /* The location of the parameter. */ location_t loc; }; /* Used when parsing an enum. Initialized by start_enum. */ struct c_enum_contents { /* * While defining an enum type, this is 1 plus the last enumerator * constant value. */ tree enum_next_value; /* Nonzero means that there was overflow computing enum_next_value. */ int enum_overflow; }; /* * A type of reference to a static identifier in an inline function. */ enum c_inline_static_type { /* * Identifier with internal linkage used in function that may be an * inline definition (i.e., file-scope static). */ csi_internal, /* * Modifiable object with static storage duration defined in function * that may be an inline definition (i.e., local static). */ csi_modifiable }; /* in c-parser.c */ extern void c_parse_init(void); extern bool c_keyword_starts_typename(enum rid keyword); /* in c-aux-info.c */ extern void gen_aux_info_record(tree, int, int, int); /* in c-decl.c */ struct c_spot_bindings; struct c_struct_parse_info; extern struct obstack parser_obstack; extern tree c_break_label; extern tree c_cont_label; extern bool global_bindings_p(void); extern tree pushdecl(tree); extern void push_scope(void); extern tree pop_scope(void); extern void c_bindings_start_stmt_expr(struct c_spot_bindings *); extern void c_bindings_end_stmt_expr(struct c_spot_bindings *); extern void record_inline_static(location_t, tree, tree, enum c_inline_static_type); extern void c_init_decl_processing(void); extern void c_print_identifier(FILE *, tree, int); extern int quals_from_declspecs(const struct c_declspecs *); extern struct c_declarator * build_array_declarator(location_t, tree, struct c_declspecs *, bool, bool); extern tree build_enumerator(location_t, location_t, struct c_enum_contents *, tree, tree); extern tree check_for_loop_decls(location_t, bool); extern void mark_forward_parm_decls(void); extern void declare_parm_level(void); extern void undeclared_variable(location_t, tree); extern tree lookup_label_for_goto(location_t, tree); extern tree declare_label(tree); extern tree define_label(location_t, tree); extern struct c_spot_bindings *c_get_switch_bindings(void); extern void c_release_switch_bindings(struct c_spot_bindings *); extern bool c_check_switch_jump_warnings(struct c_spot_bindings *, location_t, location_t); extern void finish_decl(tree, location_t, tree, tree, tree); extern tree finish_enum(tree, tree, tree); extern void finish_function(void); extern tree finish_struct(location_t, tree, tree, tree, struct c_struct_parse_info *); extern struct c_arg_info *build_arg_info(void); extern struct c_arg_info *get_parm_info(bool, tree); extern tree grokfield(location_t, struct c_declarator *, struct c_declspecs *, tree, tree *); extern tree groktypename(struct c_type_name *, tree *, bool *); extern tree grokparm(const struct c_parm *, tree *); extern tree implicitly_declare(location_t, tree); extern void keep_next_level(void); extern void pending_xref_error(void); extern void c_push_function_context(void); extern void c_pop_function_context(void); extern void push_parm_decl(const struct c_parm *, tree *); extern struct c_declarator * set_array_declarator_inner(struct c_declarator *, struct c_declarator *); extern tree c_builtin_function(tree); extern tree c_builtin_function_ext_scope(tree); extern void shadow_tag(const struct c_declspecs *); extern void shadow_tag_warned(const struct c_declspecs *, int); extern tree start_enum(location_t, struct c_enum_contents *, tree); extern bool start_function(struct c_declspecs *, struct c_declarator *, tree); extern tree start_decl(struct c_declarator *, struct c_declspecs *, bool, tree); extern tree start_struct(location_t, enum tree_code, tree, struct c_struct_parse_info **); extern void store_parm_decls(void); extern void store_parm_decls_from(struct c_arg_info *); extern void temp_store_parm_decls(tree, tree); extern void temp_pop_parm_decls(void); extern tree xref_tag(enum tree_code, tree); extern struct c_typespec parser_xref_tag(location_t, enum tree_code, tree); extern struct c_parm * build_c_parm(struct c_declspecs *, tree, struct c_declarator *, location_t); extern struct c_declarator * build_attrs_declarator(tree, struct c_declarator *); extern struct c_declarator * build_function_declarator(struct c_arg_info *, struct c_declarator *); extern struct c_declarator *build_id_declarator(tree); extern struct c_declarator * make_pointer_declarator(struct c_declspecs *, struct c_declarator *); extern struct c_declspecs *build_null_declspecs(void); extern struct c_declspecs * declspecs_add_qual(location_t, struct c_declspecs *, tree); extern struct c_declspecs * declspecs_add_type(location_t, struct c_declspecs *, struct c_typespec); extern struct c_declspecs * declspecs_add_scspec(location_t, struct c_declspecs *, tree); extern struct c_declspecs * declspecs_add_attrs(location_t, struct c_declspecs *, tree); extern struct c_declspecs * declspecs_add_addrspace(location_t, struct c_declspecs *, addr_space_t); extern struct c_declspecs * declspecs_add_alignas(location_t, struct c_declspecs *, tree); extern struct c_declspecs *finish_declspecs(struct c_declspecs *); /* in c-objc-common.c */ extern bool c_objc_common_init(void); extern bool c_missing_noreturn_ok_p(tree); extern bool c_warn_unused_global_decl(const_tree); extern void c_initialize_diagnostics(diagnostic_context *); extern bool c_vla_unspec_p(tree x, tree fn); extern alias_set_type c_get_alias_set(tree); /* in c-typeck.c */ extern int in_alignof; extern int in_sizeof; extern int in_typeof; extern tree c_last_sizeof_arg; extern location_t c_last_sizeof_loc; extern struct c_switch *c_switch_stack; extern tree c_objc_common_truthvalue_conversion(location_t, tree); extern tree require_complete_type(location_t, tree); extern bool same_translation_unit_p(const_tree, const_tree); extern int comptypes(tree, tree); extern int comptypes_check_different_types(tree, tree, bool *); extern bool c_vla_type_p(const_tree); extern bool c_mark_addressable(tree, bool = false); extern void c_incomplete_type_error(location_t, const_tree, const_tree); extern tree c_type_promotes_to(tree); extern struct c_expr default_function_array_conversion(location_t, struct c_expr); extern struct c_expr default_function_array_read_conversion(location_t, struct c_expr); extern struct c_expr convert_lvalue_to_rvalue(location_t, struct c_expr, bool, bool); extern tree decl_constant_value_1(tree, bool); extern void mark_exp_read(tree); extern tree composite_type(tree, tree); extern tree build_component_ref(location_t, tree, tree, location_t); extern tree build_array_ref(location_t, tree, tree); extern tree build_external_ref(location_t, tree, bool, tree *); extern void pop_maybe_used(bool); extern struct c_expr c_expr_sizeof_expr(location_t, struct c_expr); extern struct c_expr c_expr_sizeof_type(location_t, struct c_type_name *); extern struct c_expr parser_build_unary_op(location_t, enum tree_code, struct c_expr); extern struct c_expr parser_build_binary_op(location_t, enum tree_code, struct c_expr, struct c_expr); extern tree build_conditional_expr(location_t, tree, bool, tree, tree, location_t, tree, tree, location_t); extern tree build_compound_expr(location_t, tree, tree); extern tree c_cast_expr(location_t, struct c_type_name *, tree); extern tree build_c_cast(location_t, tree, tree); extern void store_init_value(location_t, tree, tree, tree); extern void maybe_warn_string_init(location_t, tree, struct c_expr); extern void start_init(tree, tree, int, rich_location *); extern void finish_init(void); extern void really_start_incremental_init(tree); extern void finish_implicit_inits(location_t, struct obstack *); extern void push_init_level(location_t, int, struct obstack *); extern struct c_expr pop_init_level(location_t, int, struct obstack *, location_t); extern void set_init_index(location_t, tree, tree, struct obstack *); extern void set_init_label(location_t, tree, location_t, struct obstack *); extern void process_init_element(location_t, struct c_expr, bool, struct obstack *); extern tree build_compound_literal(location_t, tree, tree, bool, unsigned int); extern void check_compound_literal_type(location_t, struct c_type_name *); extern tree c_start_case(location_t, location_t, tree, bool); extern void c_finish_case(tree, tree); extern tree build_asm_expr(location_t, tree, tree, tree, tree, tree, bool, bool); extern tree build_asm_stmt(bool, tree); extern int c_types_compatible_p(tree, tree); extern tree c_begin_compound_stmt(bool); extern tree c_end_compound_stmt(location_t, tree, bool); extern void c_finish_if_stmt(location_t, tree, tree, tree); extern void c_finish_loop(location_t, location_t, tree, location_t, tree, tree, tree, tree, bool); extern tree c_begin_stmt_expr(void); extern tree c_finish_stmt_expr(location_t, tree); extern tree c_process_expr_stmt(location_t, tree); extern tree c_finish_expr_stmt(location_t, tree); extern tree c_finish_return(location_t, tree, tree); extern tree c_finish_bc_stmt(location_t, tree *, bool); extern tree c_finish_goto_label(location_t, tree); extern tree c_finish_goto_ptr(location_t, tree); extern tree c_expr_to_decl(tree, bool *, bool *); extern tree c_finish_omp_construct(location_t, enum tree_code, tree, tree); extern tree c_finish_oacc_data(location_t, tree, tree); extern tree c_finish_oacc_host_data(location_t, tree, tree); extern tree c_begin_omp_parallel(void); extern tree c_finish_omp_parallel(location_t, tree, tree); extern tree c_begin_omp_task(void); extern tree c_finish_omp_task(location_t, tree, tree); extern void c_finish_omp_cancel(location_t, tree); extern void c_finish_omp_cancellation_point(location_t, tree); extern tree c_finish_omp_clauses(tree, enum c_omp_region_type); extern tree c_build_va_arg(location_t, tree, location_t, tree); extern tree c_finish_transaction(location_t, tree, int); extern bool c_tree_equal(tree, tree); extern tree c_build_function_call_vec(location_t, vec < location_t >, tree, vec < tree, va_gc > *, vec < tree, va_gc > *); extern tree c_omp_clause_copy_ctor(tree, tree, tree); /* * Set to 0 at beginning of a function definition, set to 1 if a return * statement that specifies a return value is seen. */ extern int current_function_returns_value; /* * Set to 0 at beginning of a function definition, set to 1 if a return * statement with no argument is seen. */ extern int current_function_returns_null; /* * Set to 0 at beginning of a function definition, set to 1 if a call to a * noreturn function is seen. */ extern int current_function_returns_abnormally; /* In c-decl.c */ /* Tell the binding oracle what kind of binding we are looking for. */ enum c_oracle_request { C_ORACLE_SYMBOL, C_ORACLE_TAG, C_ORACLE_LABEL }; /* * If this is non-NULL, then it is a "binding oracle" which can lazily create * bindings when needed by the C compiler. The oracle is told the name and * type of the binding to create. It can call pushdecl or the like to ensure * the binding is visible; or do nothing, leaving the binding untouched. * c-decl.c takes note of when the oracle has been called and will not call * it again if it fails to create a given binding. */ typedef void c_binding_oracle_function(enum c_oracle_request, tree identifier); extern c_binding_oracle_function *c_binding_oracle; extern void c_finish_incomplete_decl(tree); extern tree c_omp_reduction_id(enum tree_code, tree); extern tree c_omp_reduction_decl(tree); extern tree c_omp_reduction_lookup(tree, tree); extern tree c_check_omp_declare_reduction_r(tree *, int *, void *); extern void c_pushtag(location_t, tree, tree); extern void c_bind(location_t, tree, bool); extern bool tag_exists_p(enum tree_code, tree); /* In c-errors.c */ extern bool pedwarn_c90(location_t, int opt, const char *,...) ATTRIBUTE_GCC_DIAG(3, 4); extern bool pedwarn_c99(location_t, int opt, const char *,...) ATTRIBUTE_GCC_DIAG(3, 4); extern bool pedwarn_c11(location_t, int opt, const char *,...) ATTRIBUTE_GCC_DIAG(3, 4); extern void set_c_expr_source_range(c_expr * expr, location_t start, location_t finish); extern void set_c_expr_source_range(c_expr * expr, source_range src_range); /* In c-fold.c */ extern vec < tree > incomplete_record_decls; #if CHECKING_P namespace selftest { extern void run_c_tests(void); } //namespace selftest #endif /* #if CHECKING_P */ #endif /* ! GCC_C_TREE_H */
#ifndef GCC_C_TREE_H #define GCC_C_TREE_H #include "c-family/c-common.h" #include "diagnostic.h" /* * struct lang_identifier is private to c-decl.c, but langhooks.c needs to * know how big it is. This is sanity-checked in c-decl.c. */ #define C_SIZEOF_STRUCT_LANG_IDENTIFIER \ (sizeof (struct c_common_identifier) + 3 * sizeof (void *)) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */ #define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE) /* * In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE nonzero if the definition * of the type has already started. */ #define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE) /* * In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable * declarations whose type would be completed by completing that type. */ #define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE) /* * In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. * C_RID_CODE (node) is then the RID_* value of the keyword. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID) /* * Record whether a type or decl was written with nonconstant size. Note that * TYPE_SIZE may have simplified to a constant. */ #define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE) #define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE) /* * Record whether a type is defined inside a struct or union type. This is * used for -Wc++-compat. */ #define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE) /* Record whether an "incomplete type" error was given for the type. */ #define C_TYPE_ERROR_REPORTED(TYPE) TYPE_LANG_FLAG_3 (TYPE) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* * For a FUNCTION_DECL, nonzero if it was defined without an explicit return * type. */ #define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */ #define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP) /* For a PARM_DECL, nonzero if it was declared as an array. */ #define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0 (NODE) /* * For FUNCTION_DECLs, evaluates true if the decl is built-in but has been * declared. */ #define C_DECL_DECLARED_BUILTIN(EXP) \ DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP)) /* * For FUNCTION_DECLs, evaluates true if the decl is built-in, has a built-in * prototype and does not have a non-built-in prototype. */ #define C_DECL_BUILTIN_PROTOTYPE(EXP) \ DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP)) /* * Record whether a decl was declared register. This is strictly a front-end * flag, whereas DECL_REGISTER is used for code generation; they may differ * for structures with volatile fields. */ #define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP) /* * Record whether a decl was used in an expression anywhere except an * unevaluated operand of sizeof / typeof / alignof. This is only used for * functions declared static but not defined, though outside sizeof and * typeof it is set for other function decls as well. */ #define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP)) /* * Record whether a variable has been declared threadprivate by #pragma omp * threadprivate. */ #define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL)) /* Set on VAR_DECLs for compound literals. */ #define C_DECL_COMPOUND_LITERAL_P(DECL) \ DECL_LANG_FLAG_5 (VAR_DECL_CHECK (DECL)) /* * Nonzero for a decl which either doesn't exist or isn't a prototype. N.B. * Could be simplified if all built-in decls had complete prototypes (but * this is presently difficult because some of them need FILE*). */ #define C_DECL_ISNT_PROTOTYPE(EXP) \ (EXP == 0 \ || (!prototype_p (TREE_TYPE (EXP)) \ && !fndecl_built_in_p (EXP))) /* * For FUNCTION_TYPE, a hidden list of types of arguments. The same as * TYPE_ARG_TYPES for functions with prototypes, but created for functions * without prototypes. */ #define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE) /* * For a CONSTRUCTOR, whether some initializer contains a subexpression * meaning it is not a constant expression. */ #define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR)) /* * For a SAVE_EXPR, nonzero if the operand of the SAVE_EXPR has already been * folded. */ #define SAVE_EXPR_FOLDED_P(EXP) TREE_LANG_FLAG_1 (SAVE_EXPR_CHECK (EXP)) /* * Record parser information about an expression that is irrelevant for code * generation alongside a tree representing its value. */ struct c_expr { /* The value of the expression. */ tree value; /* * Record the original unary/binary operator of an expression, which may * have been changed by fold, STRING_CST for unparenthesized string * constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls (even if * parenthesized), for subexpressions, and for non-constant initializers, * or ERROR_MARK for other expressions (including parenthesized * expressions). */ enum tree_code original_code; /* * If not NULL, the original type of an expression. This will differ * from the type of the value field for an enum constant. The type of an * enum constant is a plain integer type, but this field will be the enum * type. */ tree original_type; /* * The source range of this expression. This is redundant for node * values that have locations, but not all node kinds have locations * (e.g. constants, and references to params, locals, etc), so we stash a * copy here. */ source_range src_range; /* * Access to the first and last locations within the source spelling of * this expression. */ location_t get_start() const { return src_range.m_start; } location_t get_finish() const { return src_range.m_finish; } location_t get_location() const { if (EXPR_HAS_LOCATION(value)) return EXPR_LOCATION(value); else return make_location(get_start(), get_start(), get_finish()); } /* * Set the value to error_mark_node whilst ensuring that src_range is * initialized. */ void set_error() { value = error_mark_node; src_range.m_start = UNKNOWN_LOCATION; src_range.m_finish = UNKNOWN_LOCATION; } }; /* * Type alias for struct c_expr. This allows to use the structure inside the * VEC types. */ typedef struct c_expr c_expr_t; /* * A kind of type specifier. Note that this information is currently only * used to distinguish tag definitions, tag references and typeof uses. */ enum c_typespec_kind { /* No typespec. This appears only in struct c_declspec. */ ctsk_none, /* A reserved keyword type specifier. */ ctsk_resword, /* * A reference to a tag, previously declared, such as "struct foo". This * includes where the previous declaration was as a different kind of * tag, in which case this is only valid if shadowing that tag in an * inner scope. */ ctsk_tagref, /* * A reference to a tag, not previously declared in a visible scope. */ ctsk_tagfirstref, /* A definition of a tag such as "struct foo { int a; }". */ ctsk_tagdef, /* A typedef name. */ ctsk_typedef, /* An ObjC-specific kind of type specifier. */ ctsk_objc, /* A typeof specifier, or _Atomic ( type-name ). */ ctsk_typeof }; /* * A type specifier: this structure is created in the parser and passed to * declspecs_add_type only. */ struct c_typespec { /* What kind of type specifier this is. */ enum c_typespec_kind kind; /* * Whether the expression has operands suitable for use in constant * expressions. */ bool expr_const_operands; /* The specifier itself. */ tree spec; /* * An expression to be evaluated before the type specifier, in the case * of typeof specifiers, or NULL otherwise or if no such expression is * required for a particular typeof specifier. In particular, when * typeof is applied to an expression of variably modified type, that * expression must be evaluated in order to determine array sizes that * form part of the type, but the expression itself (as opposed to the * array sizes) forms no part of the type and so needs to be recorded * separately. */ tree expr; }; /* A storage class specifier. */ enum c_storage_class { csc_none, csc_auto, csc_extern, csc_register, csc_static, csc_typedef }; /* * A type specifier keyword "void", "_Bool", "char", "int", "float", * "double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum", * or none of these. */ enum c_typespec_keyword { cts_none, cts_void, cts_bool, cts_char, cts_int, cts_float, cts_int_n, cts_double, cts_dfloat32, cts_dfloat64, cts_dfloat128, cts_floatn_nx, cts_fract, cts_accum, cts_auto_type }; /* * This enum lists all the possible declarator specifiers, storage class or * attribute that a user can write. There is at least one enumerator per * possible declarator specifier in the struct c_declspecs below. * * It is used to index the array of declspec locations in struct c_declspecs. */ enum c_declspec_word { cdw_typespec /* A catch-all for a typespec. */ , cdw_storage_class /* A catch-all for a storage class */ , cdw_attributes, cdw_typedef, cdw_explicit_signed, cdw_deprecated, cdw_default_int, cdw_long, cdw_long_long, cdw_short, cdw_signed, cdw_unsigned, cdw_complex, cdw_inline, cdw_noreturn, cdw_thread, cdw_const, cdw_volatile, cdw_restrict, cdw_atomic, cdw_saturating, cdw_alignas, cdw_address_space, cdw_gimple, cdw_rtl, cdw_number_of_elements /* This one must always be the last * enumerator. */ }; enum c_declspec_il { cdil_none, cdil_gimple, /* __GIMPLE */ cdil_gimple_cfg, /* __GIMPLE(cfg) */ cdil_gimple_ssa, /* __GIMPLE(ssa) */ cdil_rtl /* __RTL */ }; /* * A sequence of declaration specifiers in C. When a new declaration * specifier is added, please update the enum c_declspec_word above * accordingly. */ struct c_declspecs { location_t locations[cdw_number_of_elements]; /* * The type specified, if a single type specifier such as a struct, union * or enum specifier, typedef name or typeof specifies the whole type, or * NULL_TREE if none or a keyword such as "void" or "char" is used. Does * not include qualifiers. */ tree type; /* * Any expression to be evaluated before the type, from a typeof * specifier. */ tree expr; /* The attributes from a typedef decl. */ tree decl_attr; /* * When parsing, the attributes. Outside the parser, this will be NULL; * attributes (possibly from multiple lists) will be passed separately. */ tree attrs; /* The pass to start compiling a __GIMPLE or __RTL function with. */ char *gimple_or_rtl_pass; /* * The base-2 log of the greatest alignment required by an _Alignas * specifier, in bytes, or -1 if no such specifiers with nonzero * alignment. */ int align_log; /* * For the __intN declspec, this stores the index into the int_n_* * arrays. */ int int_n_idx; /* * For the _FloatN and _FloatNx declspec, this stores the index into the * floatn_nx_types array. */ int floatn_nx_idx; /* The storage class specifier, or csc_none if none. */ enum c_storage_class storage_class; /* * Any type specifier keyword used such as "int", not reflecting * modifiers such as "short", or cts_none if none. */ ENUM_BITFIELD(c_typespec_keyword) typespec_word: 8; /* * The kind of type specifier if one has been seen, ctsk_none otherwise. */ ENUM_BITFIELD(c_typespec_kind) typespec_kind: 3; ENUM_BITFIELD(c_declspec_il) declspec_il: 3; /* * Whether any expressions in typeof specifiers may appear in constant * expressions. */ BOOL_BITFIELD expr_const_operands:1; /* Whether any declaration specifiers have been seen at all. */ BOOL_BITFIELD declspecs_seen_p:1; /* * Whether something other than a storage class specifier or attribute * has been seen. This is used to warn for the obsolescent usage of * storage class specifiers other than at the start of the list. (Doing * this properly would require function specifiers to be handled * separately from storage class specifiers.) */ BOOL_BITFIELD non_sc_seen_p:1; /* Whether the type is specified by a typedef or typeof name. */ BOOL_BITFIELD typedef_p:1; /* * Whether the type is explicitly "signed" or specified by a typedef * whose type is explicitly "signed". */ BOOL_BITFIELD explicit_signed_p:1; /* Whether the specifiers include a deprecated typedef. */ BOOL_BITFIELD deprecated_p:1; /* * Whether the type defaulted to "int" because there were no type * specifiers. */ BOOL_BITFIELD default_int_p:1; /* Whether "long" was specified. */ BOOL_BITFIELD long_p:1; /* Whether "long" was specified more than once. */ BOOL_BITFIELD long_long_p:1; /* Whether "short" was specified. */ BOOL_BITFIELD short_p:1; /* Whether "signed" was specified. */ BOOL_BITFIELD signed_p:1; /* Whether "unsigned" was specified. */ BOOL_BITFIELD unsigned_p:1; /* Whether "complex" was specified. */ BOOL_BITFIELD complex_p:1; /* Whether "inline" was specified. */ BOOL_BITFIELD inline_p:1; /* Whether "_Noreturn" was speciied. */ BOOL_BITFIELD noreturn_p:1; /* Whether "__thread" or "_Thread_local" was specified. */ BOOL_BITFIELD thread_p:1; /* Whether "__thread" rather than "_Thread_local" was specified. */ BOOL_BITFIELD thread_gnu_p:1; /* Whether "const" was specified. */ BOOL_BITFIELD const_p:1; /* Whether "volatile" was specified. */ BOOL_BITFIELD volatile_p:1; /* Whether "restrict" was specified. */ BOOL_BITFIELD restrict_p:1; /* Whether "_Atomic" was specified. */ BOOL_BITFIELD atomic_p:1; /* Whether "_Sat" was specified. */ BOOL_BITFIELD saturating_p:1; /* * Whether any alignment specifier (even with zero alignment) was * specified. */ BOOL_BITFIELD alignas_p:1; /* The address space that the declaration belongs to. */ addr_space_t address_space; }; /* The various kinds of declarators in C. */ enum c_declarator_kind { /* An identifier. */ cdk_id, /* A function. */ cdk_function, /* An array. */ cdk_array, /* A pointer. */ cdk_pointer, /* Parenthesized declarator with nested attributes. */ cdk_attrs }; struct c_arg_tag { /* The argument name. */ tree id; /* The type of the argument. */ tree type; }; /* Information about the parameters in a function declarator. */ struct c_arg_info { /* A list of parameter decls. */ tree parms; /* A list of structure, union and enum tags defined. */ vec < c_arg_tag, va_gc > *tags; /* A list of argument types to go in the FUNCTION_TYPE. */ tree types; /* * A list of non-parameter decls (notably enumeration constants) defined * with the parameters. */ tree others; /* * A compound expression of VLA sizes from the parameters, or NULL. In a * function definition, these are used to ensure that side-effects in * sizes of arrays converted to pointers (such as a parameter int i[n++]) * take place; otherwise, they are ignored. */ tree pending_sizes; /* True when these arguments had [*]. */ BOOL_BITFIELD had_vla_unspec:1; }; /* A declarator. */ struct c_declarator { /* The kind of declarator. */ enum c_declarator_kind kind; location_t id_loc; /* Currently only set for cdk_id, cdk_array. */ /* Except for cdk_id, the contained declarator. For cdk_id, NULL. */ struct c_declarator *declarator; union { /* * For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract * declarator. */ tree id; /* For functions. */ struct c_arg_info *arg_info; /* For arrays. */ struct { /* The array dimension, or NULL for [] and [*]. */ tree dimen; /* The qualifiers inside []. */ int quals; /* The attributes (currently ignored) inside []. */ tree attrs; /* Whether [static] was used. */ BOOL_BITFIELD static_p:1; /* Whether [*] was used. */ BOOL_BITFIELD vla_unspec_p:1; } array; /* For pointers, the qualifiers on the pointer type. */ int pointer_quals; /* For attributes. */ tree attrs; } u; }; /* A type name. */ struct c_type_name { /* The declaration specifiers. */ struct c_declspecs *specs; /* The declarator. */ struct c_declarator *declarator; }; /* A parameter. */ struct c_parm { /* The declaration specifiers, minus any prefix attributes. */ struct c_declspecs *specs; /* The attributes. */ tree attrs; /* The declarator. */ struct c_declarator *declarator; /* The location of the parameter. */ location_t loc; }; /* Used when parsing an enum. Initialized by start_enum. */ struct c_enum_contents { /* * While defining an enum type, this is 1 plus the last enumerator * constant value. */ tree enum_next_value; /* Nonzero means that there was overflow computing enum_next_value. */ int enum_overflow; }; /* * A type of reference to a static identifier in an inline function. */ enum c_inline_static_type { /* * Identifier with internal linkage used in function that may be an * inline definition (i.e., file-scope static). */ csi_internal, /* * Modifiable object with static storage duration defined in function * that may be an inline definition (i.e., local static). */ csi_modifiable }; /* in c-parser.c */ extern void c_parse_init(void); extern bool c_keyword_starts_typename(enum rid keyword); /* in c-aux-info.c */ extern void gen_aux_info_record(tree, int, int, int); /* in c-decl.c */ struct c_spot_bindings; struct c_struct_parse_info; extern struct obstack parser_obstack; extern tree c_break_label; extern tree c_cont_label; extern bool global_bindings_p(void); extern tree pushdecl(tree); extern void push_scope(void); extern tree pop_scope(void); extern void c_bindings_start_stmt_expr(struct c_spot_bindings *); extern void c_bindings_end_stmt_expr(struct c_spot_bindings *); extern void record_inline_static(location_t, tree, tree, enum c_inline_static_type); extern void c_init_decl_processing(void); extern void c_print_identifier(FILE *, tree, int); extern int quals_from_declspecs(const struct c_declspecs *); extern struct c_declarator * build_array_declarator(location_t, tree, struct c_declspecs *, bool, bool); extern tree build_enumerator(location_t, location_t, struct c_enum_contents *, tree, tree); extern tree check_for_loop_decls(location_t, bool); extern void mark_forward_parm_decls(void); extern void declare_parm_level(void); extern void undeclared_variable(location_t, tree); extern tree lookup_label_for_goto(location_t, tree); extern tree declare_label(tree); extern tree define_label(location_t, tree); extern struct c_spot_bindings *c_get_switch_bindings(void); extern void c_release_switch_bindings(struct c_spot_bindings *); extern bool c_check_switch_jump_warnings(struct c_spot_bindings *, location_t, location_t); extern void finish_decl(tree, location_t, tree, tree, tree); extern tree finish_enum(tree, tree, tree); extern void finish_function(void); extern tree finish_struct(location_t, tree, tree, tree, struct c_struct_parse_info *); extern struct c_arg_info *build_arg_info(void); extern struct c_arg_info *get_parm_info(bool, tree); extern tree grokfield(location_t, struct c_declarator *, struct c_declspecs *, tree, tree *); extern tree groktypename(struct c_type_name *, tree *, bool *); extern tree grokparm(const struct c_parm *, tree *); extern tree implicitly_declare(location_t, tree); extern void keep_next_level(void); extern void pending_xref_error(void); extern void c_push_function_context(void); extern void c_pop_function_context(void); extern void push_parm_decl(const struct c_parm *, tree *); extern struct c_declarator * set_array_declarator_inner(struct c_declarator *, struct c_declarator *); extern tree c_builtin_function(tree); extern tree c_builtin_function_ext_scope(tree); extern void shadow_tag(const struct c_declspecs *); extern void shadow_tag_warned(const struct c_declspecs *, int); extern tree start_enum(location_t, struct c_enum_contents *, tree); extern bool start_function(struct c_declspecs *, struct c_declarator *, tree); extern tree start_decl(struct c_declarator *, struct c_declspecs *, bool, tree); extern tree start_struct(location_t, enum tree_code, tree, struct c_struct_parse_info **); extern void store_parm_decls(void); extern void store_parm_decls_from(struct c_arg_info *); extern void temp_store_parm_decls(tree, tree); extern void temp_pop_parm_decls(void); extern tree xref_tag(enum tree_code, tree); extern struct c_typespec parser_xref_tag(location_t, enum tree_code, tree); extern struct c_parm * build_c_parm(struct c_declspecs *, tree, struct c_declarator *, location_t); extern struct c_declarator * build_attrs_declarator(tree, struct c_declarator *); extern struct c_declarator * build_function_declarator(struct c_arg_info *, struct c_declarator *); extern struct c_declarator *build_id_declarator(tree); extern struct c_declarator * make_pointer_declarator(struct c_declspecs *, struct c_declarator *); extern struct c_declspecs *build_null_declspecs(void); extern struct c_declspecs * declspecs_add_qual(location_t, struct c_declspecs *, tree); extern struct c_declspecs * declspecs_add_type(location_t, struct c_declspecs *, struct c_typespec); extern struct c_declspecs * declspecs_add_scspec(location_t, struct c_declspecs *, tree); extern struct c_declspecs * declspecs_add_attrs(location_t, struct c_declspecs *, tree); extern struct c_declspecs * declspecs_add_addrspace(location_t, struct c_declspecs *, addr_space_t); extern struct c_declspecs * declspecs_add_alignas(location_t, struct c_declspecs *, tree); extern struct c_declspecs *finish_declspecs(struct c_declspecs *); /* in c-objc-common.c */ extern bool c_objc_common_init(void); extern bool c_missing_noreturn_ok_p(tree); extern bool c_warn_unused_global_decl(const_tree); extern void c_initialize_diagnostics(diagnostic_context *); extern bool c_vla_unspec_p(tree x, tree fn); extern alias_set_type c_get_alias_set(tree); /* in c-typeck.c */ extern int in_alignof; extern int in_sizeof; extern int in_typeof; extern tree c_last_sizeof_arg; extern location_t c_last_sizeof_loc; extern struct c_switch *c_switch_stack; extern tree c_objc_common_truthvalue_conversion(location_t, tree); extern tree require_complete_type(location_t, tree); extern bool same_translation_unit_p(const_tree, const_tree); extern int comptypes(tree, tree); extern int comptypes_check_different_types(tree, tree, bool *); extern bool c_vla_type_p(const_tree); extern bool c_mark_addressable(tree, bool = false); extern void c_incomplete_type_error(location_t, const_tree, const_tree); extern tree c_type_promotes_to(tree); extern struct c_expr default_function_array_conversion(location_t, struct c_expr); extern struct c_expr default_function_array_read_conversion(location_t, struct c_expr); extern struct c_expr convert_lvalue_to_rvalue(location_t, struct c_expr, bool, bool); extern tree decl_constant_value_1(tree, bool); extern void mark_exp_read(tree); extern tree composite_type(tree, tree); extern tree build_component_ref(location_t, tree, tree, location_t); extern tree build_array_ref(location_t, tree, tree); extern tree build_external_ref(location_t, tree, bool, tree *); extern void pop_maybe_used(bool); extern struct c_expr c_expr_sizeof_expr(location_t, struct c_expr); extern struct c_expr c_expr_sizeof_type(location_t, struct c_type_name *); extern struct c_expr parser_build_unary_op(location_t, enum tree_code, struct c_expr); extern struct c_expr parser_build_binary_op(location_t, enum tree_code, struct c_expr, struct c_expr); extern tree build_conditional_expr(location_t, tree, bool, tree, tree, location_t, tree, tree, location_t); extern tree build_compound_expr(location_t, tree, tree); extern tree c_cast_expr(location_t, struct c_type_name *, tree); extern tree build_c_cast(location_t, tree, tree); extern void store_init_value(location_t, tree, tree, tree); extern void maybe_warn_string_init(location_t, tree, struct c_expr); extern void start_init(tree, tree, int, rich_location *); extern void finish_init(void); extern void really_start_incremental_init(tree); extern void finish_implicit_inits(location_t, struct obstack *); extern void push_init_level(location_t, int, struct obstack *); extern struct c_expr pop_init_level(location_t, int, struct obstack *, location_t); extern void set_init_index(location_t, tree, tree, struct obstack *); extern void set_init_label(location_t, tree, location_t, struct obstack *); extern void process_init_element(location_t, struct c_expr, bool, struct obstack *); extern tree build_compound_literal(location_t, tree, tree, bool, unsigned int); extern void check_compound_literal_type(location_t, struct c_type_name *); extern tree c_start_case(location_t, location_t, tree, bool); extern void c_finish_case(tree, tree); extern tree build_asm_expr(location_t, tree, tree, tree, tree, tree, bool, bool); extern tree build_asm_stmt(bool, tree); extern int c_types_compatible_p(tree, tree); extern tree c_begin_compound_stmt(bool); extern tree c_end_compound_stmt(location_t, tree, bool); extern void c_finish_if_stmt(location_t, tree, tree, tree); extern void c_finish_loop(location_t, location_t, tree, location_t, tree, tree, tree, tree, bool); extern tree c_begin_stmt_expr(void); extern tree c_finish_stmt_expr(location_t, tree); extern tree c_process_expr_stmt(location_t, tree); extern tree c_finish_expr_stmt(location_t, tree); extern tree c_finish_return(location_t, tree, tree); extern tree c_finish_bc_stmt(location_t, tree *, bool); extern tree c_finish_goto_label(location_t, tree); extern tree c_finish_goto_ptr(location_t, tree); extern tree c_expr_to_decl(tree, bool *, bool *); extern tree c_finish_omp_construct(location_t, enum tree_code, tree, tree); extern tree c_finish_oacc_data(location_t, tree, tree); extern tree c_finish_oacc_host_data(location_t, tree, tree); extern tree c_begin_omp_parallel(void); extern tree c_finish_omp_parallel(location_t, tree, tree); extern tree c_begin_omp_task(void); extern tree c_finish_omp_task(location_t, tree, tree); extern void c_finish_omp_cancel(location_t, tree); extern void c_finish_omp_cancellation_point(location_t, tree); extern tree c_finish_omp_clauses(tree, enum c_omp_region_type); extern tree c_build_va_arg(location_t, tree, location_t, tree); extern tree c_finish_transaction(location_t, tree, int); extern bool c_tree_equal(tree, tree); extern tree c_build_function_call_vec(location_t, vec < location_t >, tree, vec < tree, va_gc > *, vec < tree, va_gc > *); extern tree c_omp_clause_copy_ctor(tree, tree, tree); /* * Set to 0 at beginning of a function definition, set to 1 if a return * statement that specifies a return value is seen. */ extern int current_function_returns_value; /* * Set to 0 at beginning of a function definition, set to 1 if a return * statement with no argument is seen. */ extern int current_function_returns_null; /* * Set to 0 at beginning of a function definition, set to 1 if a call to a * noreturn function is seen. */ extern int current_function_returns_abnormally; /* In c-decl.c */ /* Tell the binding oracle what kind of binding we are looking for. */ enum c_oracle_request { C_ORACLE_SYMBOL, C_ORACLE_TAG, C_ORACLE_LABEL }; /* * If this is non-NULL, then it is a "binding oracle" which can lazily create * bindings when needed by the C compiler. The oracle is told the name and * type of the binding to create. It can call pushdecl or the like to ensure * the binding is visible; or do nothing, leaving the binding untouched. * c-decl.c takes note of when the oracle has been called and will not call * it again if it fails to create a given binding. */ typedef void c_binding_oracle_function(enum c_oracle_request, tree identifier); extern c_binding_oracle_function *c_binding_oracle; extern void c_finish_incomplete_decl(tree); extern tree c_omp_reduction_id(enum tree_code, tree); extern tree c_omp_reduction_decl(tree); extern tree c_omp_reduction_lookup(tree, tree); extern tree c_check_omp_declare_reduction_r(tree *, int *, void *); extern void c_pushtag(location_t, tree, tree); extern void c_bind(location_t, tree, bool); extern bool tag_exists_p(enum tree_code, tree); /* In c-errors.c */ extern bool pedwarn_c90(location_t, int opt, const char *,...) ATTRIBUTE_GCC_DIAG(3, 4); extern bool pedwarn_c99(location_t, int opt, const char *,...) ATTRIBUTE_GCC_DIAG(3, 4); extern bool pedwarn_c11(location_t, int opt, const char *,...) ATTRIBUTE_GCC_DIAG(3, 4); extern void set_c_expr_source_range(c_expr * expr, location_t start, location_t finish); extern void set_c_expr_source_range(c_expr * expr, source_range src_range); /* In c-fold.c */ extern vec < tree > incomplete_record_decls; #if CHECKING_P namespace selftest { extern void run_c_tests(void); } //namespace selftest #endif /* #if CHECKING_P */ #endif /* ! GCC_C_TREE_H */
GB_unop__floor_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__floor_fc64_fc64) // op(A') function: GB (_unop_tran__floor_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_cfloor (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cfloor (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_cfloor (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FLOOR || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__floor_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cfloor (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cfloor (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__floor_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__floor_fc64_fc64) // op(A') function: GB (_unop_tran__floor_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_cfloor (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cfloor (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_cfloor (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FLOOR || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__floor_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cfloor (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cfloor (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__floor_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__floor_fc64_fc64) // op(A') function: GB (_unop_tran__floor_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_cfloor (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cfloor (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_cfloor (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FLOOR || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__floor_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cfloor (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cfloor (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__floor_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dataset.h
#ifndef LIGHTGBM_DATASET_H_ #define LIGHTGBM_DATASET_H_ #include <LightGBM/utils/random.h> #include <LightGBM/utils/text_reader.h> #include <LightGBM/utils/openmp_wrapper.h> #include <LightGBM/meta.h> #include <LightGBM/config.h> #include <LightGBM/feature_group.h> #include <vector> #include <utility> #include <functional> #include <string> #include <unordered_set> #include <mutex> namespace LightGBM { /*! \brief forward declaration */ class DatasetLoader; /*! * \brief This class is used to store some meta(non-feature) data for training data, * e.g. labels, weights, initial scores, qurey level informations. * * Some details: * 1. Label, used for traning. * 2. Weights, weighs of records, optional * 3. Query Boundaries, necessary for lambdarank. * The documents of i-th query is in [ query_boundarise[i], query_boundarise[i+1] ) * 4. Query Weights, auto calculate by weights and query_boundarise(if both of them are existed) * the weight for i-th query is sum(query_boundarise[i] , .., query_boundarise[i+1]) / (query_boundarise[i + 1] - query_boundarise[i+1]) * 5. Initial score. optional. if exsitng, the model will boost from this score, otherwise will start from 0. */ class Metadata { public: /*! * \brief Null costructor */ Metadata(); /*! * \brief Initialization will load qurey level informations, since it is need for sampling data * \param data_filename Filename of data * \param init_score_filename Filename of initial score */ void Init(const char* data_filename, const char* initscore_file); /*! * \brief init as subset * \param metadata Filename of data * \param used_indices * \param num_used_indices */ void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices); /*! * \brief Initial with binary memory * \param memory Pointer to memory */ void LoadFromMemory(const void* memory); /*! \brief Destructor */ ~Metadata(); /*! * \brief Initial work, will allocate space for label, weight(if exists) and query(if exists) * \param num_data Number of training data * \param weight_idx Index of weight column, < 0 means doesn't exists * \param query_idx Index of query id column, < 0 means doesn't exists */ void Init(data_size_t num_data, int weight_idx, int query_idx); /*! * \brief Partition label by used indices * \param used_indices Indice of local used */ void PartitionLabel(const std::vector<data_size_t>& used_indices); /*! * \brief Partition meta data according to local used indices if need * \param num_all_data Number of total training data, including other machines' data on parallel learning * \param used_data_indices Indices of local used training data */ void CheckOrPartition(data_size_t num_all_data, const std::vector<data_size_t>& used_data_indices); void SetLabel(const label_t* label, data_size_t len); void SetWeights(const label_t* weights, data_size_t len); void SetQuery(const data_size_t* query, data_size_t len); /*! * \brief Set initial scores * \param init_score Initial scores, this class will manage memory for init_score. */ void SetInitScore(const double* init_score, data_size_t len); /*! * \brief Save binary data to file * \param file File want to write */ void SaveBinaryToFile(const VirtualFileWriter* writer) const; /*! * \brief Get sizes in byte of this object */ size_t SizesInByte() const; /*! * \brief Get pointer of label * \return Pointer of label */ inline const label_t* label() const { return label_.data(); } /*! * \brief Set label for one record * \param idx Index of this record * \param value Label value of this record */ inline void SetLabelAt(data_size_t idx, label_t value) { label_[idx] = value; } /*! * \brief Set Weight for one record * \param idx Index of this record * \param value Weight value of this record */ inline void SetWeightAt(data_size_t idx, label_t value) { weights_[idx] = value; } /*! * \brief Set Query Id for one record * \param idx Index of this record * \param value Query Id value of this record */ inline void SetQueryAt(data_size_t idx, data_size_t value) { queries_[idx] = static_cast<data_size_t>(value); } /*! * \brief Get weights, if not exists, will return nullptr * \return Pointer of weights */ inline const label_t* weights() const { if (!weights_.empty()) { return weights_.data(); } else { return nullptr; } } /*! * \brief Get data boundaries on queries, if not exists, will return nullptr * we assume data will order by query, * the interval of [query_boundaris[i], query_boundaris[i+1]) * is the data indices for query i. * \return Pointer of data boundaries on queries */ inline const data_size_t* query_boundaries() const { if (!query_boundaries_.empty()) { return query_boundaries_.data(); } else { return nullptr; } } /*! * \brief Get Number of queries * \return Number of queries */ inline data_size_t num_queries() const { return num_queries_; } /*! * \brief Get weights for queries, if not exists, will return nullptr * \return Pointer of weights for queries */ inline const label_t* query_weights() const { if (!query_weights_.empty()) { return query_weights_.data(); } else { return nullptr; } } /*! * \brief Get initial scores, if not exists, will return nullptr * \return Pointer of initial scores */ inline const double* init_score() const { if (!init_score_.empty()) { return init_score_.data(); } else { return nullptr; } } /*! * \brief Get size of initial scores */ inline int64_t num_init_score() const { return num_init_score_; } /*! \brief Disable copy */ Metadata& operator=(const Metadata&) = delete; /*! \brief Disable copy */ Metadata(const Metadata&) = delete; private: /*! \brief Load initial scores from file */ void LoadInitialScore(const char* initscore_file); /*! \brief Load wights from file */ void LoadWeights(); /*! \brief Load query boundaries from file */ void LoadQueryBoundaries(); /*! \brief Load query wights */ void LoadQueryWeights(); /*! \brief Filename of current data */ std::string data_filename_; /*! \brief Number of data */ data_size_t num_data_; /*! \brief Number of weights, used to check correct weight file */ data_size_t num_weights_; /*! \brief Label data */ std::vector<label_t> label_; /*! \brief Weights data */ std::vector<label_t> weights_; /*! \brief Query boundaries */ std::vector<data_size_t> query_boundaries_; /*! \brief Query weights */ std::vector<label_t> query_weights_; /*! \brief Number of querys */ data_size_t num_queries_; /*! \brief Number of Initial score, used to check correct weight file */ int64_t num_init_score_; /*! \brief Initial score */ std::vector<double> init_score_; /*! \brief Queries data */ std::vector<data_size_t> queries_; /*! \brief mutex for threading safe call */ std::mutex mutex_; bool weight_load_from_file_; bool query_load_from_file_; bool init_score_load_from_file_; }; /*! \brief Interface for Parser */ class Parser { public: /*! \brief virtual destructor */ virtual ~Parser() {} /*! * \brief Parse one line with label * \param str One line record, string format, should end with '\0' * \param out_features Output columns, store in (column_idx, values) * \param out_label Label will store to this if exists */ virtual void ParseOneLine(const char* str, std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0; virtual int TotalColumns() const = 0; /*! * \brief Create a object of parser, will auto choose the format depend on file * \param filename One Filename of data * \param num_features Pass num_features of this data file if you know, <=0 means don't know * \param label_idx index of label column * \return Object of parser */ static Parser* CreateParser(const char* filename, bool has_header, int num_features, int label_idx); }; /*! \brief The main class of data set, * which are used to traning or validation */ class Dataset { public: friend DatasetLoader; LIGHTGBM_EXPORT Dataset(); LIGHTGBM_EXPORT Dataset(data_size_t num_data); void Construct( std::vector<std::unique_ptr<BinMapper>>& bin_mappers, int** sample_non_zero_indices, const int* num_per_col, size_t total_sample_cnt, const IOConfig& io_config); /*! \brief Destructor */ LIGHTGBM_EXPORT ~Dataset(); LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const { if (num_features_ != other.num_features_) { return false; } if (num_total_features_ != other.num_total_features_) { return false; } if (label_idx_ != other.label_idx_) { return false; } for (int i = 0; i < num_features_; ++i) { if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) { return false; } } return true; } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) { if (is_finish_load_) { return; } for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) { int feature_idx = used_feature_map_[i]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]); } } } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) { if (is_finish_load_) { return; } for (auto& inner_data : feature_values) { if (inner_data.first >= num_total_features_) { continue; } int feature_idx = used_feature_map_[inner_data.first]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second); } } } inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) { feature_groups_[group]->PushData(tid, sub_feature, row_idx, value); } inline int RealFeatureIndex(int fidx) const { return real_feature_idx_[fidx]; } inline int InnerFeatureIndex(int col_idx) const { return used_feature_map_[col_idx]; } inline int Feature2Group(int feature_idx) const { return feature2group_[feature_idx]; } inline int Feture2SubFeature(int feature_idx) const { return feature2subfeature_[feature_idx]; } inline uint64_t GroupBinBoundary(int group_idx) const { return group_bin_boundaries_[group_idx]; } inline uint64_t NumTotalBin() const { return group_bin_boundaries_.back(); } inline std::vector<int> ValidFeatureIndices() const { std::vector<int> ret; for (int i = 0; i < num_total_features_; ++i) { if (used_feature_map_[i] >= 0) { ret.push_back(i); } } return ret; } void ReSize(data_size_t num_data); void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data); LIGHTGBM_EXPORT void FinishLoad(); LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr); LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr); LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr); /*! * \brief Save current dataset into binary file, will save to "filename.bin" */ LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename); LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset); LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset); void ConstructHistograms(const std::vector<int8_t>& is_feature_used, const data_size_t* data_indices, data_size_t num_data, int leaf_idx, std::vector<std::unique_ptr<OrderedBin>>& ordered_bins, const score_t* gradients, const score_t* hessians, score_t* ordered_gradients, score_t* ordered_hessians, bool is_constant_hessian, HistogramBinEntry* histogram_data) const; void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data, HistogramBinEntry* data) const; inline data_size_t Split(int feature, const uint32_t* threshold, int num_threshold, bool default_left, data_size_t* data_indices, data_size_t num_data, data_size_t* lte_indices, data_size_t* gt_indices) const { const int group = feature2group_[feature]; const int sub_feature = feature2subfeature_[feature]; return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices); } inline int SubFeatureBinOffset(int i) const { const int sub_feature = feature2subfeature_[i]; if (sub_feature == 0) { return 1; } else { return 0; } } inline int FeatureNumBin(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin(); } inline int8_t FeatureMonotone(int i) const { if (monotone_types_.empty()) { return 0; } else { return monotone_types_[i]; } } bool HasMonotone() const { if (monotone_types_.empty()) { return false; } else { for (size_t i = 0; i < monotone_types_.size(); ++i) { if (monotone_types_[i] != 0) { return true; } } return false; } } inline int FeatureGroupNumBin(int group) const { return feature_groups_[group]->num_total_bin_; } inline const BinMapper* FeatureBinMapper(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature].get(); } inline const Bin* FeatureBin(int i) const { const int group = feature2group_[i]; return feature_groups_[group]->bin_data_.get(); } inline const Bin* FeatureGroupBin(int group) const { return feature_groups_[group]->bin_data_.get(); } inline bool FeatureGroupIsSparse(int group) const { return feature_groups_[group]->is_sparse_; } inline BinIterator* FeatureIterator(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->SubFeatureIterator(sub_feature); } inline BinIterator* FeatureGroupIterator(int group) const { return feature_groups_[group]->FeatureGroupIterator(); } inline double RealThreshold(int i, uint32_t threshold) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold); } // given a real threshold, find the closest threshold bin inline uint32_t BinThreshold(int i, double threshold_double) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double); } inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const { ordered_bins->resize(num_groups_); OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < num_groups_; ++i) { OMP_LOOP_EX_BEGIN(); ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin()); OMP_LOOP_EX_END(); } OMP_THROW_EX(); } /*! * \brief Get meta data pointer * \return Pointer of meta data */ inline const Metadata& metadata() const { return metadata_; } /*! \brief Get Number of used features */ inline int num_features() const { return num_features_; } /*! \brief Get Number of feature groups */ inline int num_feature_groups() const { return num_groups_;} /*! \brief Get Number of total features */ inline int num_total_features() const { return num_total_features_; } /*! \brief Get the index of label column */ inline int label_idx() const { return label_idx_; } /*! \brief Get names of current data set */ inline const std::vector<std::string>& feature_names() const { return feature_names_; } inline void set_feature_names(const std::vector<std::string>& feature_names) { if (feature_names.size() != static_cast<size_t>(num_total_features_)) { Log::Fatal("Size of feature_names error, should equal with total number of features"); } feature_names_ = std::vector<std::string>(feature_names); // replace ' ' in feature_names with '_' bool spaceInFeatureName = false; for (auto& feature_name: feature_names_){ if (feature_name.find(' ') != std::string::npos){ spaceInFeatureName = true; std::replace(feature_name.begin(), feature_name.end(), ' ', '_'); } } if (spaceInFeatureName){ Log::Warning("Find whitespaces in feature_names, replace with underlines"); } } inline std::vector<std::string> feature_infos() const { std::vector<std::string> bufs; for (int i = 0; i < num_total_features_; i++) { int fidx = used_feature_map_[i]; if (fidx == -1) { bufs.push_back("none"); } else { const auto bin_mapper = FeatureBinMapper(fidx); bufs.push_back(bin_mapper->bin_info()); } } return bufs; } /*! \brief Get Number of data */ inline data_size_t num_data() const { return num_data_; } /*! \brief Disable copy */ Dataset& operator=(const Dataset&) = delete; /*! \brief Disable copy */ Dataset(const Dataset&) = delete; private: std::string data_filename_; /*! \brief Store used features */ std::vector<std::unique_ptr<FeatureGroup>> feature_groups_; /*! \brief Mapper from real feature index to used index*/ std::vector<int> used_feature_map_; /*! \brief Number of used features*/ int num_features_; /*! \brief Number of total features*/ int num_total_features_; /*! \brief Number of total data*/ data_size_t num_data_; /*! \brief Store some label level data*/ Metadata metadata_; /*! \brief index of label column */ int label_idx_ = 0; /*! \brief Threshold for treating a feature as a sparse feature */ double sparse_threshold_; /*! \brief store feature names */ std::vector<std::string> feature_names_; /*! \brief store feature names */ static const char* binary_file_token; int num_groups_; std::vector<int> real_feature_idx_; std::vector<int> feature2group_; std::vector<int> feature2subfeature_; std::vector<uint64_t> group_bin_boundaries_; std::vector<int> group_feature_start_; std::vector<int> group_feature_cnt_; std::vector<int8_t> monotone_types_; bool is_finish_load_; }; } // namespace LightGBM #endif // LightGBM_DATA_H_
#ifndef LIGHTGBM_DATASET_H_ #define LIGHTGBM_DATASET_H_ #include <LightGBM/utils/random.h> #include <LightGBM/utils/text_reader.h> #include <LightGBM/utils/openmp_wrapper.h> #include <LightGBM/meta.h> #include <LightGBM/config.h> #include <LightGBM/feature_group.h> #include <vector> #include <utility> #include <functional> #include <string> #include <unordered_set> #include <mutex> namespace LightGBM { /* ! \brief forward declaration */ class DatasetLoader; /* * ! \brief This class is used to store some meta(non-feature) data for * training data, e.g. labels, weights, initial scores, qurey level * informations. * * Some details: 1. Label, used for traning. 2. Weights, weighs of records, * optional 3. Query Boundaries, necessary for lambdarank. The documents * of i-th query is in [ query_boundarise[i], query_boundarise[i+1] ) 4. * Query Weights, auto calculate by weights and query_boundarise(if both * of them are existed) the weight for i-th query is * sum(query_boundarise[i] , .., query_boundarise[i+1]) / * (query_boundarise[i + 1] - query_boundarise[i+1]) 5. Initial score. * optional. if exsitng, the model will boost from this score, otherwise * will start from 0. */ class Metadata { public: /* * ! \brief Null costructor */ Metadata(); /* * ! \brief Initialization will load qurey level informations, since * it is need for sampling data \param data_filename Filename of data * \param init_score_filename Filename of initial score */ void Init(const char *data_filename, const char *initscore_file); /* * ! \brief init as subset \param metadata Filename of data \param * used_indices \param num_used_indices */ void Init(const Metadata & metadata, const data_size_t * used_indices, data_size_t num_used_indices); /* * ! \brief Initial with binary memory \param memory Pointer to * memory */ void LoadFromMemory(const void *memory); /* ! \brief Destructor */ ~Metadata(); /* * ! \brief Initial work, will allocate space for label, weight(if * exists) and query(if exists) \param num_data Number of training * data \param weight_idx Index of weight column, < 0 means doesn't * exists \param query_idx Index of query id column, < 0 means * doesn't exists */ void Init(data_size_t num_data, int weight_idx, int query_idx); /* * ! \brief Partition label by used indices \param used_indices * Indice of local used */ void PartitionLabel(const std::vector < data_size_t > &used_indices); /* * ! \brief Partition meta data according to local used indices if * need \param num_all_data Number of total training data, including * other machines' data on parallel learning \param used_data_indices * Indices of local used training data */ void CheckOrPartition(data_size_t num_all_data, const std::vector < data_size_t > &used_data_indices); void SetLabel(const label_t * label, data_size_t len); void SetWeights(const label_t * weights, data_size_t len); void SetQuery(const data_size_t * query, data_size_t len); /* * ! \brief Set initial scores \param init_score Initial scores, this * class will manage memory for init_score. */ void SetInitScore(const double *init_score, data_size_t len); /* * ! \brief Save binary data to file \param file File want to write */ void SaveBinaryToFile(const VirtualFileWriter * writer)const; /* * ! \brief Get sizes in byte of this object */ size_t SizesInByte() const; /* * ! \brief Get pointer of label \return Pointer of label */ inline const label_t *label() const { return label_.data(); } /* * ! \brief Set label for one record \param idx Index of this record * \param value Label value of this record */ inline void SetLabelAt(data_size_t idx, label_t value) { label_[idx] = value; } /* * ! \brief Set Weight for one record \param idx Index of this record * \param value Weight value of this record */ inline void SetWeightAt(data_size_t idx, label_t value) { weights_[idx] = value; } /* * ! \brief Set Query Id for one record \param idx Index of this * record \param value Query Id value of this record */ inline void SetQueryAt(data_size_t idx, data_size_t value) { queries_[idx] = static_cast < data_size_t > (value); } /* * ! \brief Get weights, if not exists, will return nullptr \return * Pointer of weights */ inline const label_t *weights() const { if (!weights_.empty()) { return weights_.data(); } else { return nullptr; } } /* * ! \brief Get data boundaries on queries, if not exists, will * return nullptr we assume data will order by query, the interval of * [query_boundaris[i], query_boundaris[i+1]) is the data indices for * query i. \return Pointer of data boundaries on queries */ inline const data_size_t *query_boundaries() const { if (!query_boundaries_.empty()) { return query_boundaries_.data(); } else { return nullptr; } } /* * ! \brief Get Number of queries \return Number of queries */ inline data_size_t num_queries() const { return num_queries_; } /* * ! \brief Get weights for queries, if not exists, will return * nullptr \return Pointer of weights for queries */ inline const label_t *query_weights() const { if (!query_weights_.empty()) { return query_weights_.data(); } else { return nullptr; } } /* * ! \brief Get initial scores, if not exists, will return nullptr * \return Pointer of initial scores */ inline const double *init_score() const { if (!init_score_.empty()) { return init_score_.data(); } else { return nullptr; } } /* * ! \brief Get size of initial scores */ inline int64_t num_init_score() const { return num_init_score_; } /* ! \brief Disable copy */ Metadata & operator = (const Metadata &)= delete; /* ! \brief Disable copy */ Metadata(const Metadata &)= delete; private: /* ! \brief Load initial scores from file */ void LoadInitialScore(const char *initscore_file); /* ! \brief Load wights from file */ void LoadWeights(); /* ! \brief Load query boundaries from file */ void LoadQueryBoundaries(); /* ! \brief Load query wights */ void LoadQueryWeights(); /* ! \brief Filename of current data */ std: : string data_filename_; /* ! \brief Number of data */ data_size_t num_data_; /* ! \brief Number of weights, used to check correct weight file */ data_size_t num_weights_; /* ! \brief Label data */ std: : vector < label_t > label_; /* ! \brief Weights data */ std: : vector < label_t > weights_; /* ! \brief Query boundaries */ std: : vector < data_size_t > query_boundaries_; /* ! \brief Query weights */ std: : vector < label_t > query_weights_; /* ! \brief Number of querys */ data_size_t num_queries_; /* * ! \brief Number of Initial score, used to check correct weight * file */ int64_t num_init_score_; /* ! \brief Initial score */ std: : vector < double >init_score_; /* ! \brief Queries data */ std: : vector < data_size_t > queries_; /* ! \brief mutex for threading safe call */ std: : mutex mutex_; bool weight_load_from_file_; bool query_load_from_file_; bool init_score_load_from_file_; }; /* ! \brief Interface for Parser */ class Parser { public: /* ! \brief virtual destructor */ virtual ~ Parser() { } /* * ! \brief Parse one line with label \param str One line record, * string format, should end with '\0' \param out_features Output * columns, store in (column_idx, values) \param out_label Label will * store to this if exists */ virtual void ParseOneLine(const char *str, std::vector < std::pair < int, double >>*out_features, double *out_label)const = 0; virtual int TotalColumns() const = 0; /* * ! \brief Create a object of parser, will auto choose the format * depend on file \param filename One Filename of data \param * num_features Pass num_features of this data file if you know, <=0 * means don't know \param label_idx index of label column \return * Object of parser */ static Parser *CreateParser(const char *filename, bool has_header, int num_features, int label_idx); }; /* * ! \brief The main class of data set, which are used to traning or * validation */ class Dataset { public: friend DatasetLoader; LIGHTGBM_EXPORT Dataset(); LIGHTGBM_EXPORT Dataset(data_size_t num_data); void Construct( std::vector < std::unique_ptr < BinMapper >> &bin_mappers, int **sample_non_zero_indices, const int *num_per_col, size_t total_sample_cnt, const IOConfig & io_config); /* ! \brief Destructor */ LIGHTGBM_EXPORT ~ Dataset(); LIGHTGBM_EXPORT bool CheckAlign(const Dataset & other)const { if (num_features_ != other.num_features_) { return false; } if (num_total_features_ != other.num_total_features_) { return false; } if (label_idx_ != other.label_idx_) { return false; } for (int i = 0; i < num_features_; ++i) { if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) { return false; } } return true; } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector < double >&feature_values) { if (is_finish_load_) { return; } for (size_t i = 0; i < feature_values.size() && i < static_cast < size_t > (num_total_features_); ++i) { int feature_idx = used_feature_map_[i]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]); } } } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector < std::pair < int, double >>&feature_values) { if (is_finish_load_) { return; } for (auto & inner_data:feature_values) { if (inner_data.first >= num_total_features_) { continue; } int feature_idx = used_feature_map_[inner_data.first]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second); } } } inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) { feature_groups_[group]->PushData(tid, sub_feature, row_idx, value); } inline int RealFeatureIndex(int fidx)const { return real_feature_idx_[fidx]; } inline int InnerFeatureIndex(int col_idx)const { return used_feature_map_[col_idx]; } inline int Feature2Group(int feature_idx)const { return feature2group_[feature_idx]; } inline int Feture2SubFeature(int feature_idx)const { return feature2subfeature_[feature_idx]; } inline uint64_t GroupBinBoundary(int group_idx)const { return group_bin_boundaries_[group_idx]; } inline uint64_t NumTotalBin() const { return group_bin_boundaries_.back(); } inline std::vector < int >ValidFeatureIndices() const { std::vector < int >ret; for (int i = 0; i < num_total_features_; ++i) { if (used_feature_map_[i] >= 0) { ret.push_back(i); } } return ret; } void ReSize(data_size_t num_data); void CopySubset(const Dataset * fullset, const data_size_t * used_indices, data_size_t num_used_indices, bool need_meta_data); LIGHTGBM_EXPORT void FinishLoad(); LIGHTGBM_EXPORT bool SetFloatField(const char *field_name, const float *field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetDoubleField(const char *field_name, const double *field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetIntField(const char *field_name, const int *field_data, data_size_t num_element); LIGHTGBM_EXPORT bool GetFloatField(const char *field_name, data_size_t * out_len, const float **out_ptr); LIGHTGBM_EXPORT bool GetDoubleField(const char *field_name, data_size_t * out_len, const double **out_ptr); LIGHTGBM_EXPORT bool GetIntField(const char *field_name, data_size_t * out_len, const int **out_ptr); /* * ! \brief Save current dataset into binary file, will save to * "filename.bin" */ LIGHTGBM_EXPORT void SaveBinaryFile(const char *bin_filename); LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset * dataset); LIGHTGBM_EXPORT void CreateValid(const Dataset * dataset); void ConstructHistograms(const std::vector < int8_t > &is_feature_used, const data_size_t * data_indices, data_size_t num_data, int leaf_idx, std::vector < std::unique_ptr < OrderedBin >> &ordered_bins, const score_t * gradients, const score_t * hessians, score_t * ordered_gradients, score_t * ordered_hessians, bool is_constant_hessian, HistogramBinEntry * histogram_data)const; void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data, HistogramBinEntry * data)const; inline data_size_t Split(int feature, const uint32_t * threshold, int num_threshold, bool default_left, data_size_t * data_indices, data_size_t num_data, data_size_t * lte_indices, data_size_t * gt_indices)const { const int group = feature2group_[feature]; const int sub_feature = feature2subfeature_[feature]; return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices); } inline int SubFeatureBinOffset(int i)const { const int sub_feature = feature2subfeature_[i]; if (sub_feature == 0) { return 1; } else { return 0; } } inline int FeatureNumBin(int i)const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin(); } inline int8_t FeatureMonotone(int i)const { if (monotone_types_.empty()) { return 0; } else { return monotone_types_[i]; } } bool HasMonotone() const { if (monotone_types_.empty()) { return false; } else { for (size_t i = 0; i < monotone_types_.size(); ++i) { if (monotone_types_[i] != 0) { return true; } } return false; } } inline int FeatureGroupNumBin(int group)const { return feature_groups_[group]->num_total_bin_; } inline const BinMapper *FeatureBinMapper(int i)const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature].get(); } inline const Bin *FeatureBin(int i)const { const int group = feature2group_[i]; return feature_groups_[group]->bin_data_.get(); } inline const Bin *FeatureGroupBin(int group)const { return feature_groups_[group]->bin_data_.get(); } inline bool FeatureGroupIsSparse(int group)const { return feature_groups_[group]->is_sparse_; } inline BinIterator *FeatureIterator(int i)const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->SubFeatureIterator(sub_feature); } inline BinIterator *FeatureGroupIterator(int group)const { return feature_groups_[group]->FeatureGroupIterator(); } inline double RealThreshold(int i, uint32_t threshold)const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold); } //given a real threshold, find the closest threshold bin inline uint32_t BinThreshold(int i, double threshold_double)const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double); } inline void CreateOrderedBins(std::vector < std::unique_ptr < OrderedBin >> *ordered_bins) const { ordered_bins->resize(num_groups_); OMP_INIT_EX(); for (int i = 0; i < num_groups_; ++i) { OMP_LOOP_EX_BEGIN(); ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin()); OMP_LOOP_EX_END(); } OMP_THROW_EX(); } /* * ! \brief Get meta data pointer \return Pointer of meta data */ inline const Metadata & metadata() const { return metadata_; } /* ! \brief Get Number of used features */ inline int num_features() const { return num_features_; } /* ! \brief Get Number of feature groups */ inline int num_feature_groups() const { return num_groups_; } /* ! \brief Get Number of total features */ inline int num_total_features() const { return num_total_features_; } /* ! \brief Get the index of label column */ inline int label_idx() const { return label_idx_; } /* ! \brief Get names of current data set */ inline const std::vector < std::string > &feature_names() const { return feature_names_; } inline void set_feature_names(const std::vector < std::string > &feature_names) { if (feature_names.size() != static_cast < size_t > (num_total_features_)) { Log::Fatal("Size of feature_names error, should equal with total number of features"); } feature_names_ = std: : vector < std: :string > (feature_names); //replace ' ' in feature_names with '_' bool spaceInFeatureName = false; for (auto & feature_name:feature_names_) { if (feature_name.find(' ') != std: : string: :npos) { spaceInFeatureName = true; std: : replace(feature_name.begin(), feature_name.end(), ' ', '_'); } } if (spaceInFeatureName) { Log: : Warning("Find whitespaces in feature_names, replace with underlines"); } } inline std::vector < std::string > feature_infos() const { std::vector < std::string > bufs; for (int i = 0; i < num_total_features_; i++) { int fidx = used_feature_map_[i]; if (fidx == -1) { bufs.push_back("none"); } else { const auto bin_mapper = FeatureBinMapper(fidx); bufs.push_back(bin_mapper->bin_info()); } } return bufs; } /* ! \brief Get Number of data */ inline data_size_t num_data() const { return num_data_; } /* ! \brief Disable copy */ Dataset & operator = (const Dataset &)= delete; /* ! \brief Disable copy */ Dataset(const Dataset &)= delete; private: std: : string data_filename_; /* ! \brief Store used features */ std: : vector < std: :unique_ptr < FeatureGroup >> feature_groups_; /* ! \brief Mapper from real feature index to used index */ std: : vector < int >used_feature_map_; /* ! \brief Number of used features */ int num_features_; /* ! \brief Number of total features */ int num_total_features_; /* ! \brief Number of total data */ data_size_t num_data_; /* ! \brief Store some label level data */ Metadata metadata_; /* ! \brief index of label column */ int label_idx_ = 0; /* ! \brief Threshold for treating a feature as a sparse feature */ double sparse_threshold_; /* ! \brief store feature names */ std: : vector < std: :string > feature_names_; /* ! \brief store feature names */ static const char *binary_file_token; int num_groups_; std: : vector < int >real_feature_idx_; std: : vector < int >feature2group_; std: : vector < int >feature2subfeature_; std: : vector < uint64_t > group_bin_boundaries_; std: : vector < int >group_feature_start_; std: : vector < int >group_feature_cnt_; std: : vector < int8_t > monotone_types_; bool is_finish_load_; }; } //namespace LightGBM #endif /* // LightGBM_DATA_H_ */
#ifndef LIGHTGBM_DATASET_H_ #define LIGHTGBM_DATASET_H_ #include <LightGBM/utils/random.h> #include <LightGBM/utils/text_reader.h> #include <LightGBM/utils/openmp_wrapper.h> #include <LightGBM/meta.h> #include <LightGBM/config.h> #include <LightGBM/feature_group.h> #include <vector> #include <utility> #include <functional> #include <string> #include <unordered_set> #include <mutex> namespace LightGBM { /* ! \brief forward declaration */ class DatasetLoader; /* * ! \brief This class is used to store some meta(non-feature) data for * training data, e.g. labels, weights, initial scores, qurey level * informations. * * Some details: 1. Label, used for traning. 2. Weights, weighs of records, * optional 3. Query Boundaries, necessary for lambdarank. The documents * of i-th query is in [ query_boundarise[i], query_boundarise[i+1] ) 4. * Query Weights, auto calculate by weights and query_boundarise(if both * of them are existed) the weight for i-th query is * sum(query_boundarise[i] , .., query_boundarise[i+1]) / * (query_boundarise[i + 1] - query_boundarise[i+1]) 5. Initial score. * optional. if exsitng, the model will boost from this score, otherwise * will start from 0. */ class Metadata { public: /* * ! \brief Null costructor */ Metadata(); /* * ! \brief Initialization will load qurey level informations, since * it is need for sampling data \param data_filename Filename of data * \param init_score_filename Filename of initial score */ void Init(const char *data_filename, const char *initscore_file); /* * ! \brief init as subset \param metadata Filename of data \param * used_indices \param num_used_indices */ void Init(const Metadata & metadata, const data_size_t * used_indices, data_size_t num_used_indices); /* * ! \brief Initial with binary memory \param memory Pointer to * memory */ void LoadFromMemory(const void *memory); /* ! \brief Destructor */ ~Metadata(); /* * ! \brief Initial work, will allocate space for label, weight(if * exists) and query(if exists) \param num_data Number of training * data \param weight_idx Index of weight column, < 0 means doesn't * exists \param query_idx Index of query id column, < 0 means * doesn't exists */ void Init(data_size_t num_data, int weight_idx, int query_idx); /* * ! \brief Partition label by used indices \param used_indices * Indice of local used */ void PartitionLabel(const std::vector < data_size_t > &used_indices); /* * ! \brief Partition meta data according to local used indices if * need \param num_all_data Number of total training data, including * other machines' data on parallel learning \param used_data_indices * Indices of local used training data */ void CheckOrPartition(data_size_t num_all_data, const std::vector < data_size_t > &used_data_indices); void SetLabel(const label_t * label, data_size_t len); void SetWeights(const label_t * weights, data_size_t len); void SetQuery(const data_size_t * query, data_size_t len); /* * ! \brief Set initial scores \param init_score Initial scores, this * class will manage memory for init_score. */ void SetInitScore(const double *init_score, data_size_t len); /* * ! \brief Save binary data to file \param file File want to write */ void SaveBinaryToFile(const VirtualFileWriter * writer)const; /* * ! \brief Get sizes in byte of this object */ size_t SizesInByte() const; /* * ! \brief Get pointer of label \return Pointer of label */ inline const label_t *label() const { return label_.data(); } /* * ! \brief Set label for one record \param idx Index of this record * \param value Label value of this record */ inline void SetLabelAt(data_size_t idx, label_t value) { label_[idx] = value; } /* * ! \brief Set Weight for one record \param idx Index of this record * \param value Weight value of this record */ inline void SetWeightAt(data_size_t idx, label_t value) { weights_[idx] = value; } /* * ! \brief Set Query Id for one record \param idx Index of this * record \param value Query Id value of this record */ inline void SetQueryAt(data_size_t idx, data_size_t value) { queries_[idx] = static_cast < data_size_t > (value); } /* * ! \brief Get weights, if not exists, will return nullptr \return * Pointer of weights */ inline const label_t *weights() const { if (!weights_.empty()) { return weights_.data(); } else { return nullptr; } } /* * ! \brief Get data boundaries on queries, if not exists, will * return nullptr we assume data will order by query, the interval of * [query_boundaris[i], query_boundaris[i+1]) is the data indices for * query i. \return Pointer of data boundaries on queries */ inline const data_size_t *query_boundaries() const { if (!query_boundaries_.empty()) { return query_boundaries_.data(); } else { return nullptr; } } /* * ! \brief Get Number of queries \return Number of queries */ inline data_size_t num_queries() const { return num_queries_; } /* * ! \brief Get weights for queries, if not exists, will return * nullptr \return Pointer of weights for queries */ inline const label_t *query_weights() const { if (!query_weights_.empty()) { return query_weights_.data(); } else { return nullptr; } } /* * ! \brief Get initial scores, if not exists, will return nullptr * \return Pointer of initial scores */ inline const double *init_score() const { if (!init_score_.empty()) { return init_score_.data(); } else { return nullptr; } } /* * ! \brief Get size of initial scores */ inline int64_t num_init_score() const { return num_init_score_; } /* ! \brief Disable copy */ Metadata & operator = (const Metadata &)= delete; /* ! \brief Disable copy */ Metadata(const Metadata &)= delete; private: /* ! \brief Load initial scores from file */ void LoadInitialScore(const char *initscore_file); /* ! \brief Load wights from file */ void LoadWeights(); /* ! \brief Load query boundaries from file */ void LoadQueryBoundaries(); /* ! \brief Load query wights */ void LoadQueryWeights(); /* ! \brief Filename of current data */ std: : string data_filename_; /* ! \brief Number of data */ data_size_t num_data_; /* ! \brief Number of weights, used to check correct weight file */ data_size_t num_weights_; /* ! \brief Label data */ std: : vector < label_t > label_; /* ! \brief Weights data */ std: : vector < label_t > weights_; /* ! \brief Query boundaries */ std: : vector < data_size_t > query_boundaries_; /* ! \brief Query weights */ std: : vector < label_t > query_weights_; /* ! \brief Number of querys */ data_size_t num_queries_; /* * ! \brief Number of Initial score, used to check correct weight * file */ int64_t num_init_score_; /* ! \brief Initial score */ std: : vector < double >init_score_; /* ! \brief Queries data */ std: : vector < data_size_t > queries_; /* ! \brief mutex for threading safe call */ std: : mutex mutex_; bool weight_load_from_file_; bool query_load_from_file_; bool init_score_load_from_file_; }; /* ! \brief Interface for Parser */ class Parser { public: /* ! \brief virtual destructor */ virtual ~ Parser() { } /* * ! \brief Parse one line with label \param str One line record, * string format, should end with '\0' \param out_features Output * columns, store in (column_idx, values) \param out_label Label will * store to this if exists */ virtual void ParseOneLine(const char *str, std::vector < std::pair < int, double >>*out_features, double *out_label)const = 0; virtual int TotalColumns() const = 0; /* * ! \brief Create a object of parser, will auto choose the format * depend on file \param filename One Filename of data \param * num_features Pass num_features of this data file if you know, <=0 * means don't know \param label_idx index of label column \return * Object of parser */ static Parser *CreateParser(const char *filename, bool has_header, int num_features, int label_idx); }; /* * ! \brief The main class of data set, which are used to traning or * validation */ class Dataset { public: friend DatasetLoader; LIGHTGBM_EXPORT Dataset(); LIGHTGBM_EXPORT Dataset(data_size_t num_data); void Construct( std::vector < std::unique_ptr < BinMapper >> &bin_mappers, int **sample_non_zero_indices, const int *num_per_col, size_t total_sample_cnt, const IOConfig & io_config); /* ! \brief Destructor */ LIGHTGBM_EXPORT ~ Dataset(); LIGHTGBM_EXPORT bool CheckAlign(const Dataset & other)const { if (num_features_ != other.num_features_) { return false; } if (num_total_features_ != other.num_total_features_) { return false; } if (label_idx_ != other.label_idx_) { return false; } for (int i = 0; i < num_features_; ++i) { if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) { return false; } } return true; } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector < double >&feature_values) { if (is_finish_load_) { return; } for (size_t i = 0; i < feature_values.size() && i < static_cast < size_t > (num_total_features_); ++i) { int feature_idx = used_feature_map_[i]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]); } } } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector < std::pair < int, double >>&feature_values) { if (is_finish_load_) { return; } for (auto & inner_data:feature_values) { if (inner_data.first >= num_total_features_) { continue; } int feature_idx = used_feature_map_[inner_data.first]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second); } } } inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) { feature_groups_[group]->PushData(tid, sub_feature, row_idx, value); } inline int RealFeatureIndex(int fidx)const { return real_feature_idx_[fidx]; } inline int InnerFeatureIndex(int col_idx)const { return used_feature_map_[col_idx]; } inline int Feature2Group(int feature_idx)const { return feature2group_[feature_idx]; } inline int Feture2SubFeature(int feature_idx)const { return feature2subfeature_[feature_idx]; } inline uint64_t GroupBinBoundary(int group_idx)const { return group_bin_boundaries_[group_idx]; } inline uint64_t NumTotalBin() const { return group_bin_boundaries_.back(); } inline std::vector < int >ValidFeatureIndices() const { std::vector < int >ret; for (int i = 0; i < num_total_features_; ++i) { if (used_feature_map_[i] >= 0) { ret.push_back(i); } } return ret; } void ReSize(data_size_t num_data); void CopySubset(const Dataset * fullset, const data_size_t * used_indices, data_size_t num_used_indices, bool need_meta_data); LIGHTGBM_EXPORT void FinishLoad(); LIGHTGBM_EXPORT bool SetFloatField(const char *field_name, const float *field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetDoubleField(const char *field_name, const double *field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetIntField(const char *field_name, const int *field_data, data_size_t num_element); LIGHTGBM_EXPORT bool GetFloatField(const char *field_name, data_size_t * out_len, const float **out_ptr); LIGHTGBM_EXPORT bool GetDoubleField(const char *field_name, data_size_t * out_len, const double **out_ptr); LIGHTGBM_EXPORT bool GetIntField(const char *field_name, data_size_t * out_len, const int **out_ptr); /* * ! \brief Save current dataset into binary file, will save to * "filename.bin" */ LIGHTGBM_EXPORT void SaveBinaryFile(const char *bin_filename); LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset * dataset); LIGHTGBM_EXPORT void CreateValid(const Dataset * dataset); void ConstructHistograms(const std::vector < int8_t > &is_feature_used, const data_size_t * data_indices, data_size_t num_data, int leaf_idx, std::vector < std::unique_ptr < OrderedBin >> &ordered_bins, const score_t * gradients, const score_t * hessians, score_t * ordered_gradients, score_t * ordered_hessians, bool is_constant_hessian, HistogramBinEntry * histogram_data)const; void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data, HistogramBinEntry * data)const; inline data_size_t Split(int feature, const uint32_t * threshold, int num_threshold, bool default_left, data_size_t * data_indices, data_size_t num_data, data_size_t * lte_indices, data_size_t * gt_indices)const { const int group = feature2group_[feature]; const int sub_feature = feature2subfeature_[feature]; return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices); } inline int SubFeatureBinOffset(int i)const { const int sub_feature = feature2subfeature_[i]; if (sub_feature == 0) { return 1; } else { return 0; } } inline int FeatureNumBin(int i)const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin(); } inline int8_t FeatureMonotone(int i)const { if (monotone_types_.empty()) { return 0; } else { return monotone_types_[i]; } } bool HasMonotone() const { if (monotone_types_.empty()) { return false; } else { for (size_t i = 0; i < monotone_types_.size(); ++i) { if (monotone_types_[i] != 0) { return true; } } return false; } } inline int FeatureGroupNumBin(int group)const { return feature_groups_[group]->num_total_bin_; } inline const BinMapper *FeatureBinMapper(int i)const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature].get(); } inline const Bin *FeatureBin(int i)const { const int group = feature2group_[i]; return feature_groups_[group]->bin_data_.get(); } inline const Bin *FeatureGroupBin(int group)const { return feature_groups_[group]->bin_data_.get(); } inline bool FeatureGroupIsSparse(int group)const { return feature_groups_[group]->is_sparse_; } inline BinIterator *FeatureIterator(int i)const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->SubFeatureIterator(sub_feature); } inline BinIterator *FeatureGroupIterator(int group)const { return feature_groups_[group]->FeatureGroupIterator(); } inline double RealThreshold(int i, uint32_t threshold)const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold); } //given a real threshold, find the closest threshold bin inline uint32_t BinThreshold(int i, double threshold_double)const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double); } inline void CreateOrderedBins(std::vector < std::unique_ptr < OrderedBin >> *ordered_bins) const { ordered_bins->resize(num_groups_); OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < num_groups_; ++i) { OMP_LOOP_EX_BEGIN(); ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin()); OMP_LOOP_EX_END(); } OMP_THROW_EX(); } /* * ! \brief Get meta data pointer \return Pointer of meta data */ inline const Metadata & metadata() const { return metadata_; } /* ! \brief Get Number of used features */ inline int num_features() const { return num_features_; } /* ! \brief Get Number of feature groups */ inline int num_feature_groups() const { return num_groups_; } /* ! \brief Get Number of total features */ inline int num_total_features() const { return num_total_features_; } /* ! \brief Get the index of label column */ inline int label_idx() const { return label_idx_; } /* ! \brief Get names of current data set */ inline const std::vector < std::string > &feature_names() const { return feature_names_; } inline void set_feature_names(const std::vector < std::string > &feature_names) { if (feature_names.size() != static_cast < size_t > (num_total_features_)) { Log::Fatal("Size of feature_names error, should equal with total number of features"); } feature_names_ = std: : vector < std: :string > (feature_names); //replace ' ' in feature_names with '_' bool spaceInFeatureName = false; for (auto & feature_name:feature_names_) { if (feature_name.find(' ') != std: : string: :npos) { spaceInFeatureName = true; std: : replace(feature_name.begin(), feature_name.end(), ' ', '_'); } } if (spaceInFeatureName) { Log: : Warning("Find whitespaces in feature_names, replace with underlines"); } } inline std::vector < std::string > feature_infos() const { std::vector < std::string > bufs; for (int i = 0; i < num_total_features_; i++) { int fidx = used_feature_map_[i]; if (fidx == -1) { bufs.push_back("none"); } else { const auto bin_mapper = FeatureBinMapper(fidx); bufs.push_back(bin_mapper->bin_info()); } } return bufs; } /* ! \brief Get Number of data */ inline data_size_t num_data() const { return num_data_; } /* ! \brief Disable copy */ Dataset & operator = (const Dataset &)= delete; /* ! \brief Disable copy */ Dataset(const Dataset &)= delete; private: std: : string data_filename_; /* ! \brief Store used features */ std: : vector < std: :unique_ptr < FeatureGroup >> feature_groups_; /* ! \brief Mapper from real feature index to used index */ std: : vector < int >used_feature_map_; /* ! \brief Number of used features */ int num_features_; /* ! \brief Number of total features */ int num_total_features_; /* ! \brief Number of total data */ data_size_t num_data_; /* ! \brief Store some label level data */ Metadata metadata_; /* ! \brief index of label column */ int label_idx_ = 0; /* ! \brief Threshold for treating a feature as a sparse feature */ double sparse_threshold_; /* ! \brief store feature names */ std: : vector < std: :string > feature_names_; /* ! \brief store feature names */ static const char *binary_file_token; int num_groups_; std: : vector < int >real_feature_idx_; std: : vector < int >feature2group_; std: : vector < int >feature2subfeature_; std: : vector < uint64_t > group_bin_boundaries_; std: : vector < int >group_feature_start_; std: : vector < int >group_feature_cnt_; std: : vector < int8_t > monotone_types_; bool is_finish_load_; }; } //namespace LightGBM #endif /* // LightGBM_DATA_H_ */
rose_heat_serial_OpenMP.c
#include <omp.h> /* This code si contributed by Richard T. Evans at the Texas Advanced computing Center * The University of Texas at Austin * * To compile: icc -o heat heat_serial.c calc_up.c */ #include <stdio.h> #include <sys/time.h> #include "calc_up.h" int main() { int Nx; int Ny; int Nt; int t; int x; int y; Nx = 1000; Ny = 1000; Nt = 1000; double u[Nx][Ny]; double up[Nx][Ny]; struct timeval start; struct timeval end; float delta; // Boundary conditions for (x = 0; x < Nx; x++) for (y = 0; y < Ny; y++) { if (x == 0) u[x][y] = 1.0; else u[x][y] = 0.0; } gettimeofday(&start,0); //////////////////////////////////////////////////////////////////////// // Finite difference algorithm - iterate over time to reach steady state //////////////////////////////////////////////////////////////////////// for (t = 0; t < Nt; t++) { #pragma omp parallel default(none) shared(u,up,Nx,Ny) private(x,y) { #pragma omp for for (x = 1; x < (Nx - 1); x++) for (y = 1; y < (Ny - 1); y++) calc_up(x,y,Nx,Ny,u,up); } #pragma omp parallel default(none) shared(u,up,Nx,Ny) private(x,y) { #pragma omp for for (x = 1; x < (Nx - 1); x++) for (y = 1; y < (Ny - 1); y++) u[x][y] = up[x][y]; } } gettimeofday(&end,0); delta = (((((end.tv_sec - start.tv_sec) * 1000000u) + end.tv_usec) - start.tv_usec) / 1.e6); double sum = 0; for (y = 0; y < Ny; y++) { for (x = 0; x < Nx; x++) { sum += u[x][y]; } } printf("run time = %fs\n",delta); printf("sum of u = %f\n",sum); return 0; }
#include <omp.h> /* * This code si contributed by Richard T. Evans at the Texas Advanced * computing Center The University of Texas at Austin * * To compile: icc -o heat heat_serial.c calc_up.c */ #include <stdio.h> #include <sys/time.h> #include "calc_up.h" int main() { int Nx; int Ny; int Nt; int t; int x; int y; Nx = 1000; Ny = 1000; Nt = 1000; double u[Nx][Ny]; double up[Nx][Ny]; struct timeval start; struct timeval end; float delta; //Boundary conditions for (x = 0; x < Nx; x++) for (y = 0; y < Ny; y++) { if (x == 0) u[x][y] = 1.0; else u[x][y] = 0.0; } gettimeofday(&start, 0); //////////////////////////////////////////////////////////////////////// //Finite difference algorithm - iterate over time to reach steady state //////////////////////////////////////////////////////////////////////// for (t = 0; t < Nt; t++) { for (x = 1; x < (Nx - 1); x++) for (y = 1; y < (Ny - 1); y++) calc_up(x, y, Nx, Ny, u, up); for (x = 1; x < (Nx - 1); x++) for (y = 1; y < (Ny - 1); y++) u[x][y] = up[x][y]; } gettimeofday(&end, 0); delta = (((((end.tv_sec - start.tv_sec) * 1000000u) + end.tv_usec) - start.tv_usec) / 1.e6); double sum = 0; for (y = 0; y < Ny; y++) { for (x = 0; x < Nx; x++) { sum += u[x][y]; } } printf("run time = %fs\n", delta); printf("sum of u = %f\n", sum); return 0; }
#include <omp.h> /* * This code si contributed by Richard T. Evans at the Texas Advanced * computing Center The University of Texas at Austin * * To compile: icc -o heat heat_serial.c calc_up.c */ #include <stdio.h> #include <sys/time.h> #include "calc_up.h" int main() { int Nx; int Ny; int Nt; int t; int x; int y; Nx = 1000; Ny = 1000; Nt = 1000; double u[Nx][Ny]; double up[Nx][Ny]; struct timeval start; struct timeval end; float delta; //Boundary conditions for (x = 0; x < Nx; x++) for (y = 0; y < Ny; y++) { if (x == 0) u[x][y] = 1.0; else u[x][y] = 0.0; } gettimeofday(&start, 0); //////////////////////////////////////////////////////////////////////// //Finite difference algorithm - iterate over time to reach steady state //////////////////////////////////////////////////////////////////////// for (t = 0; t < Nt; t++) { #pragma omp parallel default(none) shared(u,up,Nx,Ny) private(x,y) { #pragma omp for for (x = 1; x < (Nx - 1); x++) for (y = 1; y < (Ny - 1); y++) calc_up(x, y, Nx, Ny, u, up); } #pragma omp parallel default(none) shared(u,up,Nx,Ny) private(x,y) { #pragma omp for for (x = 1; x < (Nx - 1); x++) for (y = 1; y < (Ny - 1); y++) u[x][y] = up[x][y]; } } gettimeofday(&end, 0); delta = (((((end.tv_sec - start.tv_sec) * 1000000u) + end.tv_usec) - start.tv_usec) / 1.e6); double sum = 0; for (y = 0; y < Ny; y++) { for (x = 0; x < Nx; x++) { sum += u[x][y]; } } printf("run time = %fs\n", delta); printf("sum of u = %f\n", sum); return 0; }
rk4.c
#include <stdio.h> #include <time.h> #include <stdlib.h> #include <sys/time.h> #define SIZE 9600 struct timeval startTime; struct timeval finishTime; double timeIntervalLength; __sw_global__ double *yt; __sw_global__ double *k1; __sw_global__ double *k2; __sw_global__ double *k3; __sw_global__ double *k4; __sw_global__ double *yout; __sw_global__ double totalSum; __sw_global__ double* y; __sw_global__ double* power; __sw_global__ double** c; __sw_global__ double h; __sw_global__ double sum; __sw_global__ int i,j; void* myMalloc(int size, int info) { void* t = (void*)malloc(size); if(!t) { printf("\nMemory allocation error [%d]",info); fflush(stdout); exit(0); } return t; } int main(int argc, char* argv[]) { h=0.3154; sum=0; // //MEMORY ALLOCATION // y = (double* )myMalloc(SIZE*sizeof(double) ,1); power = (double* )myMalloc(SIZE*sizeof(double) ,2); c = (double**)myMalloc(SIZE*sizeof(double*),3); for (i=0;i<SIZE;i++) { c[i]=(double*)myMalloc(SIZE*sizeof(double),4); } yt = (double*)myMalloc(SIZE*sizeof(double*),4); k1 = (double*)myMalloc(SIZE*sizeof(double*),5); k2 = (double*)myMalloc(SIZE*sizeof(double*),6); k3 = (double*)myMalloc(SIZE*sizeof(double*),7); k4 = (double*)myMalloc(SIZE*sizeof(double*),8); yout = (double*)myMalloc(SIZE*sizeof(double*),9); // //INITIALIZATION // for (i = 0; i < SIZE; i++) { y[i]=i*i; power[i]=i+i; for (j = 0; j < SIZE; j++) { c[i][j]=i*i+j; } } // Start timers gettimeofday(&startTime, NULL); #pragma omp parallel for schedule(static, 32) default(shared) private(i,j) { for (i = 0; i < SIZE; i++) { yt[i] = 0.0; for (j = 0; j < SIZE; j++) yt[i] += c[i][j]*y[j]; k1[i] = h*(power[i]-yt[i]); } } #pragma omp parallel for schedule(static, 32) default(shared) private(i,j) { for (i = 0; i < SIZE; i++) { yt[i] = 0.0; for (j = 0; j < SIZE; j++) yt[i] += c[i][j]*(y[j]+0.5*k1[j]); k2[i] = h*(power[i]-yt[i]); } } #pragma omp parallel for schedule(static, 32) default(shared) private(i,j) { for (i = 0; i < SIZE; i++) { yt[i] = 0.0; for (j = 0; j < SIZE; j++) yt[i] += c[i][j]*(y[j]+0.5*k2[j]); k3[i] = h*(power[i]-yt[i]); } } #pragma omp parallel for schedule(static, 32) default(shared) private(i,j) reduction(+:sum) { for (i =0; i < SIZE; i++) { yt[i]=0.0; for (j = 0; j < SIZE; j++) yt[i] += c[i][j]*(y[j]+k3[j]); k4[i] = h*(power[i]-yt[i]); yout[i] = y[i] + (k1[i] + 2*k2[i] + 2*k3[i] + k4[i])/6.0; sum+=yout[i]; } } // End timers gettimeofday(&finishTime, NULL); //Calculate the interval length timeIntervalLength = (double)(finishTime.tv_sec-startTime.tv_sec) * 1000000 + (double)(finishTime.tv_usec-startTime.tv_usec); timeIntervalLength=timeIntervalLength/1000; //Print the interval lenght printf("__aid_Time: %g msec.\n", timeIntervalLength); printf("\n\nTotalSum=%g\n\n",sum); fflush(stdout); return 0; }
#include <stdio.h> #include <time.h> #include <stdlib.h> #include <sys/time.h> #define SIZE 9600 struct timeval startTime; struct timeval finishTime; double timeIntervalLength; __sw_global__ double *yt; __sw_global__ double *k1; __sw_global__ double *k2; __sw_global__ double *k3; __sw_global__ double *k4; __sw_global__ double *yout; __sw_global__ double totalSum; __sw_global__ double *y; __sw_global__ double *power; __sw_global__ double **c; __sw_global__ double h; __sw_global__ double sum; __sw_global__ int i, j; void * myMalloc(int size, int info) { void *t = (void *)malloc(size); if (!t) { printf("\nMemory allocation error [%d]", info); fflush(stdout); exit(0); } return t; } int main(int argc, char *argv[]) { h = 0.3154; sum = 0; // //MEMORY ALLOCATION // y = (double *)myMalloc(SIZE * sizeof(double), 1); power = (double *)myMalloc(SIZE * sizeof(double), 2); c = (double **)myMalloc(SIZE * sizeof(double *), 3); for (i = 0; i < SIZE; i++) { c[i] = (double *)myMalloc(SIZE * sizeof(double), 4); } yt = (double *)myMalloc(SIZE * sizeof(double *), 4); k1 = (double *)myMalloc(SIZE * sizeof(double *), 5); k2 = (double *)myMalloc(SIZE * sizeof(double *), 6); k3 = (double *)myMalloc(SIZE * sizeof(double *), 7); k4 = (double *)myMalloc(SIZE * sizeof(double *), 8); yout = (double *)myMalloc(SIZE * sizeof(double *), 9); // //INITIALIZATION // for (i = 0; i < SIZE; i++) { y[i] = i * i; power[i] = i + i; for (j = 0; j < SIZE; j++) { c[i][j] = i * i + j; } } //Start timers gettimeofday(&startTime, NULL); for (i = 0; i < SIZE; i++) { yt[i] = 0.0; for (j = 0; j < SIZE; j++) yt[i] += c[i][j] * y[j]; k1[i] = h * (power[i] - yt[i]); } for (i = 0; i < SIZE; i++) { yt[i] = 0.0; for (j = 0; j < SIZE; j++) yt[i] += c[i][j] * (y[j] + 0.5 * k1[j]); k2[i] = h * (power[i] - yt[i]); } for (i = 0; i < SIZE; i++) { yt[i] = 0.0; for (j = 0; j < SIZE; j++) yt[i] += c[i][j] * (y[j] + 0.5 * k2[j]); k3[i] = h * (power[i] - yt[i]); } for (i = 0; i < SIZE; i++) { yt[i] = 0.0; for (j = 0; j < SIZE; j++) yt[i] += c[i][j] * (y[j] + k3[j]); k4[i] = h * (power[i] - yt[i]); yout[i] = y[i] + (k1[i] + 2 * k2[i] + 2 * k3[i] + k4[i]) / 6.0; sum += yout[i]; } //End timers gettimeofday(&finishTime, NULL); //Calculate the interval length timeIntervalLength = (double)(finishTime.tv_sec - startTime.tv_sec) * 1000000 +(double)(finishTime.tv_usec - startTime.tv_usec); timeIntervalLength = timeIntervalLength / 1000; //Print the interval lenght printf("__aid_Time: %g msec.\n", timeIntervalLength); printf("\n\nTotalSum=%g\n\n", sum); fflush(stdout); return 0; }
#include <stdio.h> #include <time.h> #include <stdlib.h> #include <sys/time.h> #define SIZE 9600 struct timeval startTime; struct timeval finishTime; double timeIntervalLength; __sw_global__ double *yt; __sw_global__ double *k1; __sw_global__ double *k2; __sw_global__ double *k3; __sw_global__ double *k4; __sw_global__ double *yout; __sw_global__ double totalSum; __sw_global__ double *y; __sw_global__ double *power; __sw_global__ double **c; __sw_global__ double h; __sw_global__ double sum; __sw_global__ int i, j; void * myMalloc(int size, int info) { void *t = (void *)malloc(size); if (!t) { printf("\nMemory allocation error [%d]", info); fflush(stdout); exit(0); } return t; } int main(int argc, char *argv[]) { h = 0.3154; sum = 0; // //MEMORY ALLOCATION // y = (double *)myMalloc(SIZE * sizeof(double), 1); power = (double *)myMalloc(SIZE * sizeof(double), 2); c = (double **)myMalloc(SIZE * sizeof(double *), 3); for (i = 0; i < SIZE; i++) { c[i] = (double *)myMalloc(SIZE * sizeof(double), 4); } yt = (double *)myMalloc(SIZE * sizeof(double *), 4); k1 = (double *)myMalloc(SIZE * sizeof(double *), 5); k2 = (double *)myMalloc(SIZE * sizeof(double *), 6); k3 = (double *)myMalloc(SIZE * sizeof(double *), 7); k4 = (double *)myMalloc(SIZE * sizeof(double *), 8); yout = (double *)myMalloc(SIZE * sizeof(double *), 9); // //INITIALIZATION // for (i = 0; i < SIZE; i++) { y[i] = i * i; power[i] = i + i; for (j = 0; j < SIZE; j++) { c[i][j] = i * i + j; } } //Start timers gettimeofday(&startTime, NULL); #pragma omp parallel for schedule(static, 32) default(shared) private(i,j) { for (i = 0; i < SIZE; i++) { yt[i] = 0.0; for (j = 0; j < SIZE; j++) yt[i] += c[i][j] * y[j]; k1[i] = h * (power[i] - yt[i]); } } #pragma omp parallel for schedule(static, 32) default(shared) private(i,j) { for (i = 0; i < SIZE; i++) { yt[i] = 0.0; for (j = 0; j < SIZE; j++) yt[i] += c[i][j] * (y[j] + 0.5 * k1[j]); k2[i] = h * (power[i] - yt[i]); } } #pragma omp parallel for schedule(static, 32) default(shared) private(i,j) { for (i = 0; i < SIZE; i++) { yt[i] = 0.0; for (j = 0; j < SIZE; j++) yt[i] += c[i][j] * (y[j] + 0.5 * k2[j]); k3[i] = h * (power[i] - yt[i]); } } #pragma omp parallel for schedule(static, 32) default(shared) private(i,j) reduction(+:sum) { for (i = 0; i < SIZE; i++) { yt[i] = 0.0; for (j = 0; j < SIZE; j++) yt[i] += c[i][j] * (y[j] + k3[j]); k4[i] = h * (power[i] - yt[i]); yout[i] = y[i] + (k1[i] + 2 * k2[i] + 2 * k3[i] + k4[i]) / 6.0; sum += yout[i]; } } //End timers gettimeofday(&finishTime, NULL); //Calculate the interval length timeIntervalLength = (double)(finishTime.tv_sec - startTime.tv_sec) * 1000000 +(double)(finishTime.tv_usec - startTime.tv_usec); timeIntervalLength = timeIntervalLength / 1000; //Print the interval lenght printf("__aid_Time: %g msec.\n", timeIntervalLength); printf("\n\nTotalSum=%g\n\n", sum); fflush(stdout); return 0; }
profile.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR OOO FFFFF IIIII L EEEEE % % P P R R O O F I L E % % PPPP RRRR O O FFF I L EEE % % P R R O O F I L E % % P R R OOO F IIIII LLLLL EEEEE % % % % % % MagickCore Image Profile Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/option-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #else #include <wchar.h> #include "lcms2.h" #endif #endif #if defined(MAGICKCORE_XML_DELEGATE) # if defined(MAGICKCORE_WINDOWS_SUPPORT) # if !defined(__MINGW32__) # include <win32config.h> # endif # endif # include <libxml/parser.h> # include <libxml/tree.h> #endif /* Forward declarations */ static MagickBooleanType SetImageProfileInternal(Image *,const char *,const StringInfo *, const MagickBooleanType,ExceptionInfo *); static void WriteTo8BimProfile(Image *,const char*,const StringInfo *); /* Typedef declarations */ struct _ProfileInfo { char *name; size_t length; unsigned char *info; size_t signature; }; typedef struct _CMSExceptionInfo { Image *image; ExceptionInfo *exception; } CMSExceptionInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickCoreSignature); if (clone_image->profiles != (void *) NULL) { if (image->profiles != (void *) NULL) DestroyImageProfiles(image); image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); WriteTo8BimProfile(image,name,(StringInfo *) NULL); return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) typedef struct _LCMSInfo { ColorspaceType colorspace; cmsUInt32Number type; size_t channels; cmsHPROFILE profile; int intent; double scale[4], translate[4]; void **magick_restrict pixels; } LCMSInfo; #if LCMS_VERSION < 2060 static void* cmsGetContextUserData(cmsContext ContextID) { return(ContextID); } static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData) { magick_unreferenced(Plugin); return((cmsContext) UserData); } static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID), cmsLogErrorHandlerFunction Fn) { magick_unreferenced(ContextID); cmsSetLogErrorHandler(Fn); } static void cmsDeleteContext(cmsContext magick_unused(ContextID)) { magick_unreferenced(ContextID); } #endif static void **DestroyPixelThreadSet(void **pixels) { ssize_t i; if (pixels == (void **) NULL) return((void **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (void *) NULL) pixels[i]=RelinquishMagickMemory(pixels[i]); pixels=(void **) RelinquishMagickMemory(pixels); return(pixels); } static void **AcquirePixelThreadSet(const size_t columns,const size_t channels, MagickBooleanType highres) { ssize_t i; size_t number_threads; size_t size; void **pixels; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(void **) AcquireQuantumMemory(number_threads,sizeof(*pixels)); if (pixels == (void **) NULL) return((void **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); size=sizeof(double); if (highres == MagickFalse) size=sizeof(Quantum); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=AcquireQuantumMemory(columns,channels*size); if (pixels[i] == (void *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet(const LCMSInfo *source_info, const LCMSInfo *target_info,const cmsUInt32Number flags, cmsContext cms_context) { cmsHTRANSFORM *transform; size_t number_threads; ssize_t i; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) memset(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR(cms_context,source_info->profile, source_info->type,target_info->profile,target_info->type, target_info->intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { CMSExceptionInfo *cms_exception; ExceptionInfo *exception; Image *image; cms_exception=(CMSExceptionInfo *) cmsGetContextUserData(context); if (cms_exception == (CMSExceptionInfo *) NULL) return; exception=cms_exception->exception; if (exception == (ExceptionInfo *) NULL) return; image=cms_exception->image; if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s'","unknown context"); return; } if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s', %s (#%u)",image->filename, message != (char *) NULL ? message : "no message",severity); } static void TransformDoublePixels(const int id,const Image* image, const LCMSInfo *source_info,const LCMSInfo *target_info, const cmsHTRANSFORM *transform,Quantum *q) { #define GetLCMSPixel(source_info,pixel,index) \ (source_info->scale[index]*((QuantumScale*pixel)+source_info->translate[index])) #define SetLCMSPixel(target_info,pixel,index) \ ClampToQuantum(target_info->scale[index]*((QuantumRange*pixel)+target_info->translate[index])) double *p; ssize_t x; p=(double *) source_info->pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetLCMSPixel(source_info,GetPixelRed(image,q),0); if (source_info->channels > 1) { *p++=GetLCMSPixel(source_info,GetPixelGreen(image,q),1); *p++=GetLCMSPixel(source_info,GetPixelBlue(image,q),2); } if (source_info->channels > 3) *p++=GetLCMSPixel(source_info,GetPixelBlack(image,q),3); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_info->pixels[id],target_info->pixels[id], (unsigned int) image->columns); p=(double *) target_info->pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_info->channels == 1) SetPixelGray(image,SetLCMSPixel(target_info,*p,0),q); else SetPixelRed(image,SetLCMSPixel(target_info,*p,0),q); p++; if (target_info->channels > 1) { SetPixelGreen(image,SetLCMSPixel(target_info,*p,1),q); p++; SetPixelBlue(image,SetLCMSPixel(target_info,*p,2),q); p++; } if (target_info->channels > 3) { SetPixelBlack(image,SetLCMSPixel(target_info,*p,3),q); p++; } q+=GetPixelChannels(image); } } static void TransformQuantumPixels(const int id,const Image* image, const LCMSInfo *source_info,const LCMSInfo *target_info, const cmsHTRANSFORM *transform,Quantum *q) { Quantum *p; ssize_t x; p=(Quantum *) source_info->pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetPixelRed(image,q); if (source_info->channels > 1) { *p++=GetPixelGreen(image,q); *p++=GetPixelBlue(image,q); } if (source_info->channels > 3) *p++=GetPixelBlack(image,q); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_info->pixels[id],target_info->pixels[id], (unsigned int) image->columns); p=(Quantum *) target_info->pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_info->channels == 1) SetPixelGray(image,*p++,q); else SetPixelRed(image,*p++,q); if (target_info->channels > 1) { SetPixelGreen(image,*p++,q); SetPixelBlue(image,*p++,q); } if (target_info->channels > 3) SetPixelBlack(image,*p++,q); q+=GetPixelChannels(image); } } static inline void SetLCMSInfoTranslate(LCMSInfo *info,const double translate) { info->translate[0]=translate; info->translate[1]=translate; info->translate[2]=translate; info->translate[3]=translate; } static inline void SetLCMSInfoScale(LCMSInfo *info,const double scale) { info->scale[0]=scale; info->scale[1]=scale; info->scale[2]=scale; info->scale[3]=scale; } #endif static MagickBooleanType SetsRGBImageProfile(Image *image, ExceptionInfo *exception) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a, 0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67, 0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70, 0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88, 0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c, 0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24, 0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d, 0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57, 0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65, 0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c, 0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2, 0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d, 0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0, 0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87, 0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19, 0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37, 0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54, 0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72, 0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90, 0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae, 0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb, 0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d, 0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32, 0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59, 0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83, 0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1, 0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1, 0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14, 0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b, 0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84, 0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1, 0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00, 0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43, 0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a, 0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3, 0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20, 0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71, 0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4, 0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c, 0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77, 0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5, 0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37, 0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d, 0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07, 0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74, 0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5, 0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a, 0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2, 0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f, 0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf, 0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54, 0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc, 0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69, 0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9, 0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e, 0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26, 0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3, 0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64, 0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09, 0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3, 0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61, 0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13, 0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9, 0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84, 0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43, 0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06, 0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce, 0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b, 0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c, 0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41, 0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b, 0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa, 0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd, 0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5, 0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2, 0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3, 0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99, 0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94, 0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94, 0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98, 0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1, 0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf, 0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2, 0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda, 0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7, 0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18, 0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f, 0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b, 0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b, 0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1, 0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c, 0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c, 0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91, 0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb, 0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a, 0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f, 0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8, 0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37, 0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c, 0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05, 0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74, 0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8, 0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61, 0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0, 0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64, 0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee, 0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d, 0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12, 0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab, 0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b, 0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0, 0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a, 0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a, 0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00, 0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb, 0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c, 0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42, 0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f, 0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0, 0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8, 0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95, 0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78, 0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61, 0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f, 0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43, 0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d, 0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d, 0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43, 0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f, 0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60, 0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78, 0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95, 0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8, 0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1, 0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11, 0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46, 0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81, 0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2, 0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a, 0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57, 0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab, 0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04, 0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64, 0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca, 0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36, 0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8, 0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20, 0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f, 0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24, 0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf, 0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40, 0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8, 0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76, 0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a, 0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4, 0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75, 0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d, 0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea, 0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae, 0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79, 0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a, 0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21, 0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff, 0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3, 0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce, 0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf, 0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7, 0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5, 0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba, 0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6, 0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8, 0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1, 0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10, 0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36, 0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63, 0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96, 0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0, 0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11, 0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58, 0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7, 0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb, 0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57, 0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba, 0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (GetImageProfile(image,"icc") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icc",profile,exception); profile=DestroyStringInfo(profile); return(status); } MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length,ExceptionInfo *exception) { #define ProfileImageTag "Profile/Image" #ifndef TYPE_XYZ_8 #define TYPE_XYZ_8 (COLORSPACE_SH(PT_XYZ)|CHANNELS_SH(3)|BYTES_SH(1)) #endif #define ThrowProfileException(severity,tag,context) \ { \ if (profile != (StringInfo *) NULL) \ profile=DestroyStringInfo(profile); \ if (cms_context != (cmsContext) NULL) \ cmsDeleteContext(cms_context); \ if (source_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_info.profile); \ if (target_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_info.profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char *next; /* Delete image profile(s). */ ResetImageProfileIterator(image); for (next=GetNextImageProfile(image); next != (const char *) NULL; ) { if (IsOptionMember(next,name) != MagickFalse) { (void) DeleteImageProfile(image,next); ResetImageProfileIterator(image); } next=GetNextImageProfile(image); } return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile,exception); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace",exception); (void) value; if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image,exception); value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image,exception); icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (LCMS)",image->filename); #else { cmsContext cms_context; CMSExceptionInfo cms_exception; LCMSInfo source_info, target_info; /* Transform pixel colors as defined by the color profiles. */ cms_exception.image=image; cms_exception.exception=exception; cms_context=cmsCreateContext(NULL,&cms_exception); if (cms_context == (cmsContext) NULL) { profile=DestroyStringInfo(profile); ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } cmsSetLogErrorHandlerTHR(cms_context,CMSExceptionHandler); source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_info.profile == (cmsHPROFILE) NULL) { profile=DestroyStringInfo(profile); cmsDeleteContext(cms_context); ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } if ((cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile,exception); else { CacheView *image_view; cmsColorSpaceSignature signature; cmsHTRANSFORM *magick_restrict transform; cmsUInt32Number flags; MagickBooleanType highres; MagickOffsetType progress; ssize_t y; target_info.profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_info.profile=source_info.profile; source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(icc_profile),(cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_info.profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } highres=MagickTrue; #if !defined(MAGICKCORE_HDRI_SUPPORT) || (MAGICKCORE_QUANTUM_DEPTH > 16) { const char *artifact; artifact=GetImageArtifact(image,"profile:highres-transform"); if (IsStringFalse(artifact) != MagickFalse) highres=MagickFalse; } #endif SetLCMSInfoScale(&source_info,1.0); SetLCMSInfoTranslate(&source_info,0.0); source_info.colorspace=sRGBColorspace; source_info.channels=3; switch (cmsGetColorSpace(source_info.profile)) { case cmsSigCmykData: { source_info.colorspace=CMYKColorspace; source_info.channels=4; if (highres != MagickFalse) { source_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; SetLCMSInfoScale(&source_info,100.0); } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_CMYK_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { source_info.colorspace=GRAYColorspace; source_info.channels=1; if (highres != MagickFalse) source_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_GRAY_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { source_info.colorspace=LabColorspace; if (highres != MagickFalse) { source_info.type=(cmsUInt32Number) TYPE_Lab_DBL; source_info.scale[0]=100.0; source_info.scale[1]=255.0; source_info.scale[2]=255.0; source_info.translate[1]=(-0.5); source_info.translate[2]=(-0.5); } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_Lab_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } case cmsSigRgbData: { source_info.colorspace=sRGBColorspace; if (highres != MagickFalse) source_info.type=(cmsUInt32Number) TYPE_RGB_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_RGB_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { source_info.colorspace=XYZColorspace; if (highres != MagickFalse) source_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_XYZ_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } signature=cmsGetPCS(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_info.profile); SetLCMSInfoScale(&target_info,1.0); SetLCMSInfoTranslate(&target_info,0.0); target_info.channels=3; switch (signature) { case cmsSigCmykData: { target_info.colorspace=CMYKColorspace; target_info.channels=4; if (highres != MagickFalse) { target_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; SetLCMSInfoScale(&target_info,0.01); } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_CMYK_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { target_info.colorspace=GRAYColorspace; target_info.channels=1; if (highres != MagickFalse) target_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_GRAY_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { target_info.colorspace=LabColorspace; if (highres != MagickFalse) { target_info.type=(cmsUInt32Number) TYPE_Lab_DBL; target_info.scale[0]=0.01; target_info.scale[1]=1/255.0; target_info.scale[2]=1/255.0; target_info.translate[1]=0.5; target_info.translate[2]=0.5; } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_Lab_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } case cmsSigRgbData: { target_info.colorspace=sRGBColorspace; if (highres != MagickFalse) target_info.type=(cmsUInt32Number) TYPE_RGB_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_RGB_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { target_info.colorspace=XYZColorspace; if (highres != MagickFalse) target_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_XYZ_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } switch (image->rendering_intent) { case AbsoluteIntent: { target_info.intent=INTENT_ABSOLUTE_COLORIMETRIC; break; } case PerceptualIntent: { target_info.intent=INTENT_PERCEPTUAL; break; } case RelativeIntent: { target_info.intent=INTENT_RELATIVE_COLORIMETRIC; break; } case SaturationIntent: { target_info.intent=INTENT_SATURATION; break; } default: { target_info.intent=INTENT_PERCEPTUAL; break; } } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(&source_info,&target_info,flags, cms_context); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_info.pixels=AcquirePixelThreadSet(image->columns, source_info.channels,highres); target_info.pixels=AcquirePixelThreadSet(image->columns, target_info.channels,highres); if ((source_info.pixels == (void **) NULL) || (target_info.pixels == (void **) NULL)) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); if (source_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); return(MagickFalse); } if (target_info.colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_info.colorspace,exception); progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } if (highres != MagickFalse) TransformDoublePixels(id,image,&source_info,&target_info, transform,q); else TransformQuantumPixels(id,image,&source_info,&target_info, transform,q); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ProfileImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_info.colorspace,exception); switch (signature) { case cmsSigRgbData: { image->type=image->alpha_trait == UndefinedPixelTrait ? TrueColorType : TrueColorAlphaType; break; } case cmsSigCmykData: { image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; break; } case cmsSigGrayData: { image->type=image->alpha_trait == UndefinedPixelTrait ? GrayscaleType : GrayscaleAlphaType; break; } default: break; } target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); if ((status != MagickFalse) && (cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass)) status=SetImageProfile(image,name,profile,exception); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); } (void) cmsCloseProfile(source_info.profile); cmsDeleteContext(cms_context); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); WriteTo8BimProfile(image,name,(StringInfo *) NULL); profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, unsigned int *quantum) { *quantum=(unsigned int) (*p++) << 24; *quantum|=(unsigned int) (*p++) << 16; *quantum|=(unsigned int) (*p++) << 8; *quantum|=(unsigned int) (*p++); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++) << 8; *quantum|=(unsigned short) (*p++); return(p); } static inline void WriteResourceLong(unsigned char *p, const unsigned int quantum) { unsigned char buffer[4]; buffer[0]=(unsigned char) (quantum >> 24); buffer[1]=(unsigned char) (quantum >> 16); buffer[2]=(unsigned char) (quantum >> 8); buffer[3]=(unsigned char) quantum; (void) memcpy(p,buffer,4); } static void WriteTo8BimProfile(Image *image,const char *name, const StringInfo *profile) { const unsigned char *datum, *q; const unsigned char *p; size_t length; StringInfo *profile_8bim; ssize_t count; unsigned char length_byte; unsigned int value; unsigned short id, profile_id; if (LocaleCompare(name,"icc") == 0) profile_id=0x040f; else if (LocaleCompare(name,"iptc") == 0) profile_id=0x0404; else if (LocaleCompare(name,"xmp") == 0) profile_id=0x0424; else return; profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,"8bim"); if (profile_8bim == (StringInfo *) NULL) return; datum=GetStringInfoDatum(profile_8bim); length=GetStringInfoLength(profile_8bim); for (p=datum; p < (datum+length-16); ) { q=p; if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((count & 0x01) != 0) count++; if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length)) break; if (id != profile_id) p+=count; else { size_t extent, offset; ssize_t extract_extent; StringInfo *extract_profile; extract_extent=0; extent=(datum+length)-(p+count); if (profile == (StringInfo *) NULL) { offset=(q-datum); extract_profile=AcquireStringInfo(offset+extent); (void) memcpy(extract_profile->datum,datum,offset); } else { offset=(p-datum); extract_extent=profile->length; if ((extract_extent & 0x01) != 0) extract_extent++; extract_profile=AcquireStringInfo(offset+extract_extent+extent); (void) memcpy(extract_profile->datum,datum,offset-4); WriteResourceLong(extract_profile->datum+offset-4,(unsigned int) profile->length); (void) memcpy(extract_profile->datum+offset, profile->datum,profile->length); } (void) memcpy(extract_profile->datum+offset+extract_extent, p+count,extent); (void) AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString("8bim"),CloneStringInfo(extract_profile)); extract_profile=DestroyStringInfo(extract_profile); break; } } } static void GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block,ExceptionInfo *exception) { const unsigned char *datum; const unsigned char *p; size_t length; ssize_t count; StringInfo *profile; unsigned char length_byte; unsigned int value; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0)) break; switch (id) { case 0x03ed: { unsigned int resolution; unsigned short units; /* Resolution. */ if (count < 10) break; p=ReadResourceLong(p,&resolution); image->resolution.x=((double) resolution)/65536.0; p=ReadResourceShort(p,&units)+2; p=ReadResourceLong(p,&resolution)+4; image->resolution.y=((double) resolution)/65536.0; /* Values are always stored as pixels per inch. */ if ((ResolutionType) units != PixelsPerCentimeterResolution) image->units=PixelsPerInchResolution; else { image->units=PixelsPerCentimeterResolution; image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"iptc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"icc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"exif",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"xmp",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } } static void PatchCorruptProfile(const char *name,StringInfo *profile) { unsigned char *p; size_t length; /* Detect corrupt profiles and if discovered, repair. */ if (LocaleCompare(name,"xmp") == 0) { /* Remove garbage after xpacket end. */ p=GetStringInfoDatum(profile); p=(unsigned char *) strstr((const char *) p,"<?xpacket end=\"w\"?>"); if (p != (unsigned char *) NULL) { p+=19; length=p-GetStringInfoDatum(profile); if (length != GetStringInfoLength(profile)) { *p='\0'; SetStringInfoLength(profile,length); } } return; } if (LocaleCompare(name,"exif") == 0) { /* Check if profile starts with byte order marker instead of Exif. */ p=GetStringInfoDatum(profile); if ((LocaleNCompare((const char *) p,"MM",2) == 0) || (LocaleNCompare((const char *) p,"II",2) == 0)) { const unsigned char profile_start[] = "Exif\0\0"; StringInfo *exif_profile; exif_profile=AcquireStringInfo(6); if (exif_profile != (StringInfo *) NULL) { SetStringInfoDatum(exif_profile,profile_start); ConcatenateStringInfo(exif_profile,profile); SetStringInfoLength(profile,GetStringInfoLength(exif_profile)); SetStringInfo(profile,exif_profile); exif_profile=DestroyStringInfo(exif_profile); } } } } #if defined(MAGICKCORE_XML_DELEGATE) static MagickBooleanType ValidateXMPProfile(Image *image, const StringInfo *profile,ExceptionInfo *exception) { xmlDocPtr document; /* Parse XML profile. */ document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int) GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR | XML_PARSE_NOWARNING); if (document == (xmlDocPtr) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "CorruptImageProfile","`%s' (XMP)",image->filename); return(MagickFalse); } xmlFreeDoc(document); return(MagickTrue); } #else static MagickBooleanType ValidateXMPProfile(Image *image, const StringInfo *profile,ExceptionInfo *exception) { (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateWarning, "DelegateLibrarySupportNotBuiltIn","'%s' (XML)",image->filename); return(MagickFalse); } #endif static MagickBooleanType SetImageProfileInternal(Image *image,const char *name, const StringInfo *profile,const MagickBooleanType recursive, ExceptionInfo *exception) { char key[MagickPathExtent]; MagickBooleanType status; StringInfo *clone_profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); clone_profile=CloneStringInfo(profile); PatchCorruptProfile(name,clone_profile); if ((LocaleCompare(name,"xmp") == 0) && (ValidateXMPProfile(image,clone_profile,exception) == MagickFalse)) { clone_profile=DestroyStringInfo(clone_profile); return(MagickTrue); } if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MagickPathExtent); LocaleLower(key); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),clone_profile); if (status != MagickFalse) { if (LocaleCompare(name,"8bim") == 0) GetProfilesFromResourceBlock(image,clone_profile,exception); else if (recursive == MagickFalse) WriteTo8BimProfile(image,name,clone_profile); } return(status); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile,ExceptionInfo *exception) { return(SetImageProfileInternal(image,name,profile,MagickFalse,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline signed short ReadProfileShort(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned short value; if (endian == LSBEndian) { value=(unsigned short) buffer[1] << 8; value|=(unsigned short) buffer[0]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } value=(unsigned short) buffer[0] << 8; value|=(unsigned short) buffer[1]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } static inline signed int ReadProfileLong(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned int value; if (endian == LSBEndian) { value=(unsigned int) buffer[3] << 24; value|=(unsigned int) buffer[2] << 16; value|=(unsigned int) buffer[1] << 8; value|=(unsigned int) buffer[0]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } value=(unsigned int) buffer[0] << 24; value|=(unsigned int) buffer[1] << 16; value|=(unsigned int) buffer[2] << 8; value|=(unsigned int) buffer[3]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length) { signed int value; if (*length < 4) return(0); value=ReadProfileLong(MSBEndian,*p); (*length)-=4; *p+=4; return(value); } static inline signed short ReadProfileMSBShort(unsigned char **p, size_t *length) { signed short value; if (*length < 2) return(0); value=ReadProfileShort(MSBEndian,*p); (*length)-=2; *p+=2; return(value); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) memcpy(p,buffer,4); return; } buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) memcpy(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) memcpy(p,buffer,2); return; } buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) memcpy(p,buffer,2); } static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile) { size_t length; ssize_t count; unsigned char *p; unsigned short id; length=GetStringInfoLength(profile); p=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&p,&length) != 0x38) continue; if (ReadProfileByte(&p,&length) != 0x42) continue; if (ReadProfileByte(&p,&length) != 0x49) continue; if (ReadProfileByte(&p,&length) != 0x4D) continue; if (length < 7) return(MagickFalse); id=ReadProfileMSBShort(&p,&length); count=(ssize_t) ReadProfileByte(&p,&length); if ((count >= (ssize_t) length) || (count < 0)) return(MagickFalse); p+=count; length-=count; if ((*p & 0x01) == 0) (void) ReadProfileByte(&p,&length); count=(ssize_t) ReadProfileMSBLong(&p,&length); if ((count > (ssize_t) length) || (count < 0)) return(MagickFalse); if ((id == 0x3ED) && (count == 16)) { if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.x*2.54*65536.0),p); else WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.x*65536.0),p); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4); if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.y*2.54*65536.0),p+8); else WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.y*65536.0),p+8); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12); } p+=count; length-=count; } return(MagickTrue); } MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, length, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; unsigned char *directory, *exif; /* Set EXIF resolution tag. */ length=GetStringInfoLength(profile); exif=GetStringInfoDatum(profile); if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); if ((id != 0x4949) && (id != 0x4D4D)) { while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); } endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ReadProfileLong(endian,exif+4); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } if ((directory < exif) || (directory > (exif+length-2))) break; /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (q > (exif+length-12)) break; /* corrupt EXIF */ if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS)) break; components=(int) ReadProfileLong(endian,q+4); if (components < 0) break; /* corrupt EXIF */ number_bytes=(size_t) components*format_bytes[format]; if ((ssize_t) number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { /* The directory entry contains an offset. */ offset=(ssize_t) ReadProfileLong(endian,q+8); if ((offset < 0) || ((size_t) (offset+number_bytes) > length)) continue; if (~length < number_bytes) continue; /* prevent overflow */ p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) image->orientation,p); break; } (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) (image->units+1),p); break; } (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { offset=(ssize_t) ReadProfileLong(endian,p); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ReadProfileLong(endian,directory+2+(12* number_entries)); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); } MagickPrivate MagickBooleanType SyncImageProfiles(Image *image) { MagickBooleanType status; StringInfo *profile; status=MagickTrue; profile=(StringInfo *) GetImageProfile(image,"8BIM"); if (profile != (StringInfo *) NULL) if (Sync8BimProfile(image,profile) == MagickFalse) status=MagickFalse; profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile != (StringInfo *) NULL) if (SyncExifProfile(image,profile) == MagickFalse) status=MagickFalse; return(status); } static void UpdateClipPath(unsigned char *blob,size_t length, const size_t old_columns,const size_t old_rows, const RectangleInfo *new_geometry) { ssize_t i; ssize_t knot_count, selector; knot_count=0; while (length != 0) { selector=(ssize_t) ReadProfileMSBShort(&blob,&length); switch (selector) { case 0: case 3: { if (knot_count != 0) { blob+=24; length-=MagickMin(24,(ssize_t) length); break; } /* Expected subpath length record. */ knot_count=(ssize_t) ReadProfileMSBShort(&blob,&length); blob+=22; length-=MagickMin(22,(ssize_t) length); break; } case 1: case 2: case 4: case 5: { if (knot_count == 0) { /* Unexpected subpath knot. */ blob+=24; length-=MagickMin(24,(ssize_t) length); break; } /* Add sub-path knot */ for (i=0; i < 3; i++) { double x, y; signed int xx, yy; y=(double) ReadProfileMSBLong(&blob,&length); y=y*old_rows/4096.0/4096.0; y-=new_geometry->y; yy=(signed int) ((y*4096*4096)/new_geometry->height); WriteProfileLong(MSBEndian,(size_t) yy,blob-4); x=(double) ReadProfileMSBLong(&blob,&length); x=x*old_columns/4096.0/4096.0; x-=new_geometry->x; xx=(signed int) ((x*4096*4096)/new_geometry->width); WriteProfileLong(MSBEndian,(size_t) xx,blob-4); } knot_count--; break; } case 6: case 7: case 8: default: { blob+=24; length-=MagickMin(24,(ssize_t) length); break; } } } } MagickPrivate void Update8BIMClipPath(const Image *image, const size_t old_columns,const size_t old_rows, const RectangleInfo *new_geometry) { const StringInfo *profile; size_t length; ssize_t count, id; unsigned char *info; assert(image != (Image *) NULL); assert(new_geometry != (RectangleInfo *) NULL); profile=GetImageProfile(image,"8bim"); if (profile == (StringInfo *) NULL) return; length=GetStringInfoLength(profile); info=GetStringInfoDatum(profile); while (length > 0) { if (ReadProfileByte(&info,&length) != (unsigned char) '8') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'B') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'I') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'M') continue; id=(ssize_t) ReadProfileMSBShort(&info,&length); count=(ssize_t) ReadProfileByte(&info,&length); if ((count != 0) && ((size_t) count <= length)) { info+=count; length-=count; } if ((count & 0x01) == 0) (void) ReadProfileByte(&info,&length); count=(ssize_t) ReadProfileMSBLong(&info,&length); if ((count < 0) || ((size_t) count > length)) { length=0; continue; } if ((id > 1999) && (id < 2999)) UpdateClipPath(info,(size_t) count,old_columns,old_rows,new_geometry); info+=count; length-=MagickMin(count,(ssize_t) length); } }
/* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/option-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #else #include <wchar.h> #include "lcms2.h" #endif #endif #if defined(MAGICKCORE_XML_DELEGATE) # if defined(MAGICKCORE_WINDOWS_SUPPORT) # if !defined(__MINGW32__) # include <win32config.h> # endif # endif # include <libxml/parser.h> # include <libxml/tree.h> #endif /* Forward declarations */ static MagickBooleanType SetImageProfileInternal(Image *,const char *,const StringInfo *, const MagickBooleanType,ExceptionInfo *); static void WriteTo8BimProfile(Image *,const char*,const StringInfo *); /* Typedef declarations */ struct _ProfileInfo { char *name; size_t length; unsigned char *info; size_t signature; }; typedef struct _CMSExceptionInfo { Image *image; ExceptionInfo *exception; } CMSExceptionInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickCoreSignature); if (clone_image->profiles != (void *) NULL) { if (image->profiles != (void *) NULL) DestroyImageProfiles(image); image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); WriteTo8BimProfile(image,name,(StringInfo *) NULL); return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) typedef struct _LCMSInfo { ColorspaceType colorspace; cmsUInt32Number type; size_t channels; cmsHPROFILE profile; int intent; double scale[4], translate[4]; void **magick_restrict pixels; } LCMSInfo; #if LCMS_VERSION < 2060 static void* cmsGetContextUserData(cmsContext ContextID) { return(ContextID); } static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData) { magick_unreferenced(Plugin); return((cmsContext) UserData); } static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID), cmsLogErrorHandlerFunction Fn) { magick_unreferenced(ContextID); cmsSetLogErrorHandler(Fn); } static void cmsDeleteContext(cmsContext magick_unused(ContextID)) { magick_unreferenced(ContextID); } #endif static void **DestroyPixelThreadSet(void **pixels) { ssize_t i; if (pixels == (void **) NULL) return((void **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (void *) NULL) pixels[i]=RelinquishMagickMemory(pixels[i]); pixels=(void **) RelinquishMagickMemory(pixels); return(pixels); } static void **AcquirePixelThreadSet(const size_t columns,const size_t channels, MagickBooleanType highres) { ssize_t i; size_t number_threads; size_t size; void **pixels; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(void **) AcquireQuantumMemory(number_threads,sizeof(*pixels)); if (pixels == (void **) NULL) return((void **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); size=sizeof(double); if (highres == MagickFalse) size=sizeof(Quantum); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=AcquireQuantumMemory(columns,channels*size); if (pixels[i] == (void *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet(const LCMSInfo *source_info, const LCMSInfo *target_info,const cmsUInt32Number flags, cmsContext cms_context) { cmsHTRANSFORM *transform; size_t number_threads; ssize_t i; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) memset(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR(cms_context,source_info->profile, source_info->type,target_info->profile,target_info->type, target_info->intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { CMSExceptionInfo *cms_exception; ExceptionInfo *exception; Image *image; cms_exception=(CMSExceptionInfo *) cmsGetContextUserData(context); if (cms_exception == (CMSExceptionInfo *) NULL) return; exception=cms_exception->exception; if (exception == (ExceptionInfo *) NULL) return; image=cms_exception->image; if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s'","unknown context"); return; } if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s', %s (#%u)",image->filename, message != (char *) NULL ? message : "no message",severity); } static void TransformDoublePixels(const int id,const Image* image, const LCMSInfo *source_info,const LCMSInfo *target_info, const cmsHTRANSFORM *transform,Quantum *q) { #define GetLCMSPixel(source_info,pixel,index) \ (source_info->scale[index]*((QuantumScale*pixel)+source_info->translate[index])) #define SetLCMSPixel(target_info,pixel,index) \ ClampToQuantum(target_info->scale[index]*((QuantumRange*pixel)+target_info->translate[index])) double *p; ssize_t x; p=(double *) source_info->pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetLCMSPixel(source_info,GetPixelRed(image,q),0); if (source_info->channels > 1) { *p++=GetLCMSPixel(source_info,GetPixelGreen(image,q),1); *p++=GetLCMSPixel(source_info,GetPixelBlue(image,q),2); } if (source_info->channels > 3) *p++=GetLCMSPixel(source_info,GetPixelBlack(image,q),3); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_info->pixels[id],target_info->pixels[id], (unsigned int) image->columns); p=(double *) target_info->pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_info->channels == 1) SetPixelGray(image,SetLCMSPixel(target_info,*p,0),q); else SetPixelRed(image,SetLCMSPixel(target_info,*p,0),q); p++; if (target_info->channels > 1) { SetPixelGreen(image,SetLCMSPixel(target_info,*p,1),q); p++; SetPixelBlue(image,SetLCMSPixel(target_info,*p,2),q); p++; } if (target_info->channels > 3) { SetPixelBlack(image,SetLCMSPixel(target_info,*p,3),q); p++; } q+=GetPixelChannels(image); } } static void TransformQuantumPixels(const int id,const Image* image, const LCMSInfo *source_info,const LCMSInfo *target_info, const cmsHTRANSFORM *transform,Quantum *q) { Quantum *p; ssize_t x; p=(Quantum *) source_info->pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetPixelRed(image,q); if (source_info->channels > 1) { *p++=GetPixelGreen(image,q); *p++=GetPixelBlue(image,q); } if (source_info->channels > 3) *p++=GetPixelBlack(image,q); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_info->pixels[id],target_info->pixels[id], (unsigned int) image->columns); p=(Quantum *) target_info->pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_info->channels == 1) SetPixelGray(image,*p++,q); else SetPixelRed(image,*p++,q); if (target_info->channels > 1) { SetPixelGreen(image,*p++,q); SetPixelBlue(image,*p++,q); } if (target_info->channels > 3) SetPixelBlack(image,*p++,q); q+=GetPixelChannels(image); } } static inline void SetLCMSInfoTranslate(LCMSInfo *info,const double translate) { info->translate[0]=translate; info->translate[1]=translate; info->translate[2]=translate; info->translate[3]=translate; } static inline void SetLCMSInfoScale(LCMSInfo *info,const double scale) { info->scale[0]=scale; info->scale[1]=scale; info->scale[2]=scale; info->scale[3]=scale; } #endif static MagickBooleanType SetsRGBImageProfile(Image *image, ExceptionInfo *exception) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a, 0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67, 0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70, 0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88, 0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c, 0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24, 0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d, 0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57, 0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65, 0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c, 0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2, 0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d, 0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0, 0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87, 0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19, 0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37, 0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54, 0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72, 0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90, 0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae, 0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb, 0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d, 0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32, 0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59, 0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83, 0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1, 0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1, 0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14, 0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b, 0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84, 0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1, 0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00, 0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43, 0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a, 0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3, 0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20, 0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71, 0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4, 0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c, 0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77, 0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5, 0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37, 0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d, 0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07, 0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74, 0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5, 0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a, 0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2, 0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f, 0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf, 0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54, 0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc, 0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69, 0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9, 0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e, 0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26, 0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3, 0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64, 0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09, 0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3, 0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61, 0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13, 0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9, 0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84, 0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43, 0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06, 0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce, 0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b, 0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c, 0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41, 0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b, 0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa, 0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd, 0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5, 0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2, 0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3, 0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99, 0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94, 0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94, 0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98, 0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1, 0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf, 0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2, 0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda, 0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7, 0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18, 0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f, 0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b, 0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b, 0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1, 0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c, 0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c, 0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91, 0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb, 0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a, 0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f, 0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8, 0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37, 0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c, 0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05, 0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74, 0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8, 0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61, 0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0, 0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64, 0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee, 0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d, 0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12, 0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab, 0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b, 0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0, 0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a, 0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a, 0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00, 0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb, 0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c, 0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42, 0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f, 0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0, 0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8, 0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95, 0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78, 0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61, 0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f, 0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43, 0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d, 0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d, 0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43, 0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f, 0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60, 0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78, 0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95, 0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8, 0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1, 0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11, 0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46, 0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81, 0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2, 0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a, 0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57, 0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab, 0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04, 0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64, 0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca, 0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36, 0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8, 0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20, 0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f, 0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24, 0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf, 0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40, 0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8, 0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76, 0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a, 0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4, 0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75, 0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d, 0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea, 0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae, 0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79, 0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a, 0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21, 0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff, 0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3, 0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce, 0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf, 0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7, 0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5, 0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba, 0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6, 0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8, 0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1, 0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10, 0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36, 0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63, 0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96, 0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0, 0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11, 0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58, 0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7, 0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb, 0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57, 0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba, 0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (GetImageProfile(image,"icc") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icc",profile,exception); profile=DestroyStringInfo(profile); return(status); } MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length,ExceptionInfo *exception) { #define ProfileImageTag "Profile/Image" #ifndef TYPE_XYZ_8 #define TYPE_XYZ_8 (COLORSPACE_SH(PT_XYZ)|CHANNELS_SH(3)|BYTES_SH(1)) #endif #define ThrowProfileException(severity,tag,context) \ { \ if (profile != (StringInfo *) NULL) \ profile=DestroyStringInfo(profile); \ if (cms_context != (cmsContext) NULL) \ cmsDeleteContext(cms_context); \ if (source_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_info.profile); \ if (target_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_info.profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char *next; /* Delete image profile(s). */ ResetImageProfileIterator(image); for (next=GetNextImageProfile(image); next != (const char *) NULL; ) { if (IsOptionMember(next,name) != MagickFalse) { (void) DeleteImageProfile(image,next); ResetImageProfileIterator(image); } next=GetNextImageProfile(image); } return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile,exception); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace",exception); (void) value; if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image,exception); value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image,exception); icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (LCMS)",image->filename); #else { cmsContext cms_context; CMSExceptionInfo cms_exception; LCMSInfo source_info, target_info; /* Transform pixel colors as defined by the color profiles. */ cms_exception.image=image; cms_exception.exception=exception; cms_context=cmsCreateContext(NULL,&cms_exception); if (cms_context == (cmsContext) NULL) { profile=DestroyStringInfo(profile); ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } cmsSetLogErrorHandlerTHR(cms_context,CMSExceptionHandler); source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_info.profile == (cmsHPROFILE) NULL) { profile=DestroyStringInfo(profile); cmsDeleteContext(cms_context); ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } if ((cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile,exception); else { CacheView *image_view; cmsColorSpaceSignature signature; cmsHTRANSFORM *magick_restrict transform; cmsUInt32Number flags; MagickBooleanType highres; MagickOffsetType progress; ssize_t y; target_info.profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_info.profile=source_info.profile; source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(icc_profile),(cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_info.profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } highres=MagickTrue; #if !defined(MAGICKCORE_HDRI_SUPPORT) || (MAGICKCORE_QUANTUM_DEPTH > 16) { const char *artifact; artifact=GetImageArtifact(image,"profile:highres-transform"); if (IsStringFalse(artifact) != MagickFalse) highres=MagickFalse; } #endif SetLCMSInfoScale(&source_info,1.0); SetLCMSInfoTranslate(&source_info,0.0); source_info.colorspace=sRGBColorspace; source_info.channels=3; switch (cmsGetColorSpace(source_info.profile)) { case cmsSigCmykData: { source_info.colorspace=CMYKColorspace; source_info.channels=4; if (highres != MagickFalse) { source_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; SetLCMSInfoScale(&source_info,100.0); } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_CMYK_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { source_info.colorspace=GRAYColorspace; source_info.channels=1; if (highres != MagickFalse) source_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_GRAY_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { source_info.colorspace=LabColorspace; if (highres != MagickFalse) { source_info.type=(cmsUInt32Number) TYPE_Lab_DBL; source_info.scale[0]=100.0; source_info.scale[1]=255.0; source_info.scale[2]=255.0; source_info.translate[1]=(-0.5); source_info.translate[2]=(-0.5); } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_Lab_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } case cmsSigRgbData: { source_info.colorspace=sRGBColorspace; if (highres != MagickFalse) source_info.type=(cmsUInt32Number) TYPE_RGB_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_RGB_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { source_info.colorspace=XYZColorspace; if (highres != MagickFalse) source_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_XYZ_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } signature=cmsGetPCS(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_info.profile); SetLCMSInfoScale(&target_info,1.0); SetLCMSInfoTranslate(&target_info,0.0); target_info.channels=3; switch (signature) { case cmsSigCmykData: { target_info.colorspace=CMYKColorspace; target_info.channels=4; if (highres != MagickFalse) { target_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; SetLCMSInfoScale(&target_info,0.01); } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_CMYK_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { target_info.colorspace=GRAYColorspace; target_info.channels=1; if (highres != MagickFalse) target_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_GRAY_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { target_info.colorspace=LabColorspace; if (highres != MagickFalse) { target_info.type=(cmsUInt32Number) TYPE_Lab_DBL; target_info.scale[0]=0.01; target_info.scale[1]=1/255.0; target_info.scale[2]=1/255.0; target_info.translate[1]=0.5; target_info.translate[2]=0.5; } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_Lab_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } case cmsSigRgbData: { target_info.colorspace=sRGBColorspace; if (highres != MagickFalse) target_info.type=(cmsUInt32Number) TYPE_RGB_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_RGB_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { target_info.colorspace=XYZColorspace; if (highres != MagickFalse) target_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_XYZ_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } switch (image->rendering_intent) { case AbsoluteIntent: { target_info.intent=INTENT_ABSOLUTE_COLORIMETRIC; break; } case PerceptualIntent: { target_info.intent=INTENT_PERCEPTUAL; break; } case RelativeIntent: { target_info.intent=INTENT_RELATIVE_COLORIMETRIC; break; } case SaturationIntent: { target_info.intent=INTENT_SATURATION; break; } default: { target_info.intent=INTENT_PERCEPTUAL; break; } } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(&source_info,&target_info,flags, cms_context); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_info.pixels=AcquirePixelThreadSet(image->columns, source_info.channels,highres); target_info.pixels=AcquirePixelThreadSet(image->columns, target_info.channels,highres); if ((source_info.pixels == (void **) NULL) || (target_info.pixels == (void **) NULL)) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); if (source_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); return(MagickFalse); } if (target_info.colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_info.colorspace,exception); progress=0; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } if (highres != MagickFalse) TransformDoublePixels(id,image,&source_info,&target_info, transform,q); else TransformQuantumPixels(id,image,&source_info,&target_info, transform,q); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,ProfileImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_info.colorspace,exception); switch (signature) { case cmsSigRgbData: { image->type=image->alpha_trait == UndefinedPixelTrait ? TrueColorType : TrueColorAlphaType; break; } case cmsSigCmykData: { image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; break; } case cmsSigGrayData: { image->type=image->alpha_trait == UndefinedPixelTrait ? GrayscaleType : GrayscaleAlphaType; break; } default: break; } target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); if ((status != MagickFalse) && (cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass)) status=SetImageProfile(image,name,profile,exception); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); } (void) cmsCloseProfile(source_info.profile); cmsDeleteContext(cms_context); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); WriteTo8BimProfile(image,name,(StringInfo *) NULL); profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, unsigned int *quantum) { *quantum=(unsigned int) (*p++) << 24; *quantum|=(unsigned int) (*p++) << 16; *quantum|=(unsigned int) (*p++) << 8; *quantum|=(unsigned int) (*p++); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++) << 8; *quantum|=(unsigned short) (*p++); return(p); } static inline void WriteResourceLong(unsigned char *p, const unsigned int quantum) { unsigned char buffer[4]; buffer[0]=(unsigned char) (quantum >> 24); buffer[1]=(unsigned char) (quantum >> 16); buffer[2]=(unsigned char) (quantum >> 8); buffer[3]=(unsigned char) quantum; (void) memcpy(p,buffer,4); } static void WriteTo8BimProfile(Image *image,const char *name, const StringInfo *profile) { const unsigned char *datum, *q; const unsigned char *p; size_t length; StringInfo *profile_8bim; ssize_t count; unsigned char length_byte; unsigned int value; unsigned short id, profile_id; if (LocaleCompare(name,"icc") == 0) profile_id=0x040f; else if (LocaleCompare(name,"iptc") == 0) profile_id=0x0404; else if (LocaleCompare(name,"xmp") == 0) profile_id=0x0424; else return; profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,"8bim"); if (profile_8bim == (StringInfo *) NULL) return; datum=GetStringInfoDatum(profile_8bim); length=GetStringInfoLength(profile_8bim); for (p=datum; p < (datum+length-16); ) { q=p; if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((count & 0x01) != 0) count++; if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length)) break; if (id != profile_id) p+=count; else { size_t extent, offset; ssize_t extract_extent; StringInfo *extract_profile; extract_extent=0; extent=(datum+length)-(p+count); if (profile == (StringInfo *) NULL) { offset=(q-datum); extract_profile=AcquireStringInfo(offset+extent); (void) memcpy(extract_profile->datum,datum,offset); } else { offset=(p-datum); extract_extent=profile->length; if ((extract_extent & 0x01) != 0) extract_extent++; extract_profile=AcquireStringInfo(offset+extract_extent+extent); (void) memcpy(extract_profile->datum,datum,offset-4); WriteResourceLong(extract_profile->datum+offset-4,(unsigned int) profile->length); (void) memcpy(extract_profile->datum+offset, profile->datum,profile->length); } (void) memcpy(extract_profile->datum+offset+extract_extent, p+count,extent); (void) AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString("8bim"),CloneStringInfo(extract_profile)); extract_profile=DestroyStringInfo(extract_profile); break; } } } static void GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block,ExceptionInfo *exception) { const unsigned char *datum; const unsigned char *p; size_t length; ssize_t count; StringInfo *profile; unsigned char length_byte; unsigned int value; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0)) break; switch (id) { case 0x03ed: { unsigned int resolution; unsigned short units; /* Resolution. */ if (count < 10) break; p=ReadResourceLong(p,&resolution); image->resolution.x=((double) resolution)/65536.0; p=ReadResourceShort(p,&units)+2; p=ReadResourceLong(p,&resolution)+4; image->resolution.y=((double) resolution)/65536.0; /* Values are always stored as pixels per inch. */ if ((ResolutionType) units != PixelsPerCentimeterResolution) image->units=PixelsPerInchResolution; else { image->units=PixelsPerCentimeterResolution; image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"iptc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"icc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"exif",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"xmp",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } } static void PatchCorruptProfile(const char *name,StringInfo *profile) { unsigned char *p; size_t length; /* Detect corrupt profiles and if discovered, repair. */ if (LocaleCompare(name,"xmp") == 0) { /* Remove garbage after xpacket end. */ p=GetStringInfoDatum(profile); p=(unsigned char *) strstr((const char *) p,"<?xpacket end=\"w\"?>"); if (p != (unsigned char *) NULL) { p+=19; length=p-GetStringInfoDatum(profile); if (length != GetStringInfoLength(profile)) { *p='\0'; SetStringInfoLength(profile,length); } } return; } if (LocaleCompare(name,"exif") == 0) { /* Check if profile starts with byte order marker instead of Exif. */ p=GetStringInfoDatum(profile); if ((LocaleNCompare((const char *) p,"MM",2) == 0) || (LocaleNCompare((const char *) p,"II",2) == 0)) { const unsigned char profile_start[] = "Exif\0\0"; StringInfo *exif_profile; exif_profile=AcquireStringInfo(6); if (exif_profile != (StringInfo *) NULL) { SetStringInfoDatum(exif_profile,profile_start); ConcatenateStringInfo(exif_profile,profile); SetStringInfoLength(profile,GetStringInfoLength(exif_profile)); SetStringInfo(profile,exif_profile); exif_profile=DestroyStringInfo(exif_profile); } } } } #if defined(MAGICKCORE_XML_DELEGATE) static MagickBooleanType ValidateXMPProfile(Image *image, const StringInfo *profile,ExceptionInfo *exception) { xmlDocPtr document; /* Parse XML profile. */ document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int) GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR | XML_PARSE_NOWARNING); if (document == (xmlDocPtr) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "CorruptImageProfile","`%s' (XMP)",image->filename); return(MagickFalse); } xmlFreeDoc(document); return(MagickTrue); } #else static MagickBooleanType ValidateXMPProfile(Image *image, const StringInfo *profile,ExceptionInfo *exception) { (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateWarning, "DelegateLibrarySupportNotBuiltIn","'%s' (XML)",image->filename); return(MagickFalse); } #endif static MagickBooleanType SetImageProfileInternal(Image *image,const char *name, const StringInfo *profile,const MagickBooleanType recursive, ExceptionInfo *exception) { char key[MagickPathExtent]; MagickBooleanType status; StringInfo *clone_profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); clone_profile=CloneStringInfo(profile); PatchCorruptProfile(name,clone_profile); if ((LocaleCompare(name,"xmp") == 0) && (ValidateXMPProfile(image,clone_profile,exception) == MagickFalse)) { clone_profile=DestroyStringInfo(clone_profile); return(MagickTrue); } if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MagickPathExtent); LocaleLower(key); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),clone_profile); if (status != MagickFalse) { if (LocaleCompare(name,"8bim") == 0) GetProfilesFromResourceBlock(image,clone_profile,exception); else if (recursive == MagickFalse) WriteTo8BimProfile(image,name,clone_profile); } return(status); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile,ExceptionInfo *exception) { return(SetImageProfileInternal(image,name,profile,MagickFalse,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline signed short ReadProfileShort(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned short value; if (endian == LSBEndian) { value=(unsigned short) buffer[1] << 8; value|=(unsigned short) buffer[0]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } value=(unsigned short) buffer[0] << 8; value|=(unsigned short) buffer[1]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } static inline signed int ReadProfileLong(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned int value; if (endian == LSBEndian) { value=(unsigned int) buffer[3] << 24; value|=(unsigned int) buffer[2] << 16; value|=(unsigned int) buffer[1] << 8; value|=(unsigned int) buffer[0]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } value=(unsigned int) buffer[0] << 24; value|=(unsigned int) buffer[1] << 16; value|=(unsigned int) buffer[2] << 8; value|=(unsigned int) buffer[3]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length) { signed int value; if (*length < 4) return(0); value=ReadProfileLong(MSBEndian,*p); (*length)-=4; *p+=4; return(value); } static inline signed short ReadProfileMSBShort(unsigned char **p, size_t *length) { signed short value; if (*length < 2) return(0); value=ReadProfileShort(MSBEndian,*p); (*length)-=2; *p+=2; return(value); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) memcpy(p,buffer,4); return; } buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) memcpy(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) memcpy(p,buffer,2); return; } buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) memcpy(p,buffer,2); } static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile) { size_t length; ssize_t count; unsigned char *p; unsigned short id; length=GetStringInfoLength(profile); p=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&p,&length) != 0x38) continue; if (ReadProfileByte(&p,&length) != 0x42) continue; if (ReadProfileByte(&p,&length) != 0x49) continue; if (ReadProfileByte(&p,&length) != 0x4D) continue; if (length < 7) return(MagickFalse); id=ReadProfileMSBShort(&p,&length); count=(ssize_t) ReadProfileByte(&p,&length); if ((count >= (ssize_t) length) || (count < 0)) return(MagickFalse); p+=count; length-=count; if ((*p & 0x01) == 0) (void) ReadProfileByte(&p,&length); count=(ssize_t) ReadProfileMSBLong(&p,&length); if ((count > (ssize_t) length) || (count < 0)) return(MagickFalse); if ((id == 0x3ED) && (count == 16)) { if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.x*2.54*65536.0),p); else WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.x*65536.0),p); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4); if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.y*2.54*65536.0),p+8); else WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.y*65536.0),p+8); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12); } p+=count; length-=count; } return(MagickTrue); } MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, length, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; unsigned char *directory, *exif; /* Set EXIF resolution tag. */ length=GetStringInfoLength(profile); exif=GetStringInfoDatum(profile); if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); if ((id != 0x4949) && (id != 0x4D4D)) { while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); } endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ReadProfileLong(endian,exif+4); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } if ((directory < exif) || (directory > (exif+length-2))) break; /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (q > (exif+length-12)) break; /* corrupt EXIF */ if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS)) break; components=(int) ReadProfileLong(endian,q+4); if (components < 0) break; /* corrupt EXIF */ number_bytes=(size_t) components*format_bytes[format]; if ((ssize_t) number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { /* The directory entry contains an offset. */ offset=(ssize_t) ReadProfileLong(endian,q+8); if ((offset < 0) || ((size_t) (offset+number_bytes) > length)) continue; if (~length < number_bytes) continue; /* prevent overflow */ p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) image->orientation,p); break; } (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) (image->units+1),p); break; } (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { offset=(ssize_t) ReadProfileLong(endian,p); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ReadProfileLong(endian,directory+2+(12* number_entries)); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); } MagickPrivate MagickBooleanType SyncImageProfiles(Image *image) { MagickBooleanType status; StringInfo *profile; status=MagickTrue; profile=(StringInfo *) GetImageProfile(image,"8BIM"); if (profile != (StringInfo *) NULL) if (Sync8BimProfile(image,profile) == MagickFalse) status=MagickFalse; profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile != (StringInfo *) NULL) if (SyncExifProfile(image,profile) == MagickFalse) status=MagickFalse; return(status); } static void UpdateClipPath(unsigned char *blob,size_t length, const size_t old_columns,const size_t old_rows, const RectangleInfo *new_geometry) { ssize_t i; ssize_t knot_count, selector; knot_count=0; while (length != 0) { selector=(ssize_t) ReadProfileMSBShort(&blob,&length); switch (selector) { case 0: case 3: { if (knot_count != 0) { blob+=24; length-=MagickMin(24,(ssize_t) length); break; } /* Expected subpath length record. */ knot_count=(ssize_t) ReadProfileMSBShort(&blob,&length); blob+=22; length-=MagickMin(22,(ssize_t) length); break; } case 1: case 2: case 4: case 5: { if (knot_count == 0) { /* Unexpected subpath knot. */ blob+=24; length-=MagickMin(24,(ssize_t) length); break; } /* Add sub-path knot */ for (i=0; i < 3; i++) { double x, y; signed int xx, yy; y=(double) ReadProfileMSBLong(&blob,&length); y=y*old_rows/4096.0/4096.0; y-=new_geometry->y; yy=(signed int) ((y*4096*4096)/new_geometry->height); WriteProfileLong(MSBEndian,(size_t) yy,blob-4); x=(double) ReadProfileMSBLong(&blob,&length); x=x*old_columns/4096.0/4096.0; x-=new_geometry->x; xx=(signed int) ((x*4096*4096)/new_geometry->width); WriteProfileLong(MSBEndian,(size_t) xx,blob-4); } knot_count--; break; } case 6: case 7: case 8: default: { blob+=24; length-=MagickMin(24,(ssize_t) length); break; } } } } MagickPrivate void Update8BIMClipPath(const Image *image, const size_t old_columns,const size_t old_rows, const RectangleInfo *new_geometry) { const StringInfo *profile; size_t length; ssize_t count, id; unsigned char *info; assert(image != (Image *) NULL); assert(new_geometry != (RectangleInfo *) NULL); profile=GetImageProfile(image,"8bim"); if (profile == (StringInfo *) NULL) return; length=GetStringInfoLength(profile); info=GetStringInfoDatum(profile); while (length > 0) { if (ReadProfileByte(&info,&length) != (unsigned char) '8') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'B') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'I') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'M') continue; id=(ssize_t) ReadProfileMSBShort(&info,&length); count=(ssize_t) ReadProfileByte(&info,&length); if ((count != 0) && ((size_t) count <= length)) { info+=count; length-=count; } if ((count & 0x01) == 0) (void) ReadProfileByte(&info,&length); count=(ssize_t) ReadProfileMSBLong(&info,&length); if ((count < 0) || ((size_t) count > length)) { length=0; continue; } if ((id > 1999) && (id < 2999)) UpdateClipPath(info,(size_t) count,old_columns,old_rows,new_geometry); info+=count; length-=MagickMin(count,(ssize_t) length); } }
/* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/option-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #else #include <wchar.h> #include "lcms2.h" #endif #endif #if defined(MAGICKCORE_XML_DELEGATE) # if defined(MAGICKCORE_WINDOWS_SUPPORT) # if !defined(__MINGW32__) # include <win32config.h> # endif # endif # include <libxml/parser.h> # include <libxml/tree.h> #endif /* Forward declarations */ static MagickBooleanType SetImageProfileInternal(Image *,const char *,const StringInfo *, const MagickBooleanType,ExceptionInfo *); static void WriteTo8BimProfile(Image *,const char*,const StringInfo *); /* Typedef declarations */ struct _ProfileInfo { char *name; size_t length; unsigned char *info; size_t signature; }; typedef struct _CMSExceptionInfo { Image *image; ExceptionInfo *exception; } CMSExceptionInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickCoreSignature); if (clone_image->profiles != (void *) NULL) { if (image->profiles != (void *) NULL) DestroyImageProfiles(image); image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); WriteTo8BimProfile(image,name,(StringInfo *) NULL); return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) typedef struct _LCMSInfo { ColorspaceType colorspace; cmsUInt32Number type; size_t channels; cmsHPROFILE profile; int intent; double scale[4], translate[4]; void **magick_restrict pixels; } LCMSInfo; #if LCMS_VERSION < 2060 static void* cmsGetContextUserData(cmsContext ContextID) { return(ContextID); } static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData) { magick_unreferenced(Plugin); return((cmsContext) UserData); } static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID), cmsLogErrorHandlerFunction Fn) { magick_unreferenced(ContextID); cmsSetLogErrorHandler(Fn); } static void cmsDeleteContext(cmsContext magick_unused(ContextID)) { magick_unreferenced(ContextID); } #endif static void **DestroyPixelThreadSet(void **pixels) { ssize_t i; if (pixels == (void **) NULL) return((void **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (void *) NULL) pixels[i]=RelinquishMagickMemory(pixels[i]); pixels=(void **) RelinquishMagickMemory(pixels); return(pixels); } static void **AcquirePixelThreadSet(const size_t columns,const size_t channels, MagickBooleanType highres) { ssize_t i; size_t number_threads; size_t size; void **pixels; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(void **) AcquireQuantumMemory(number_threads,sizeof(*pixels)); if (pixels == (void **) NULL) return((void **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); size=sizeof(double); if (highres == MagickFalse) size=sizeof(Quantum); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=AcquireQuantumMemory(columns,channels*size); if (pixels[i] == (void *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet(const LCMSInfo *source_info, const LCMSInfo *target_info,const cmsUInt32Number flags, cmsContext cms_context) { cmsHTRANSFORM *transform; size_t number_threads; ssize_t i; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) memset(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR(cms_context,source_info->profile, source_info->type,target_info->profile,target_info->type, target_info->intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { CMSExceptionInfo *cms_exception; ExceptionInfo *exception; Image *image; cms_exception=(CMSExceptionInfo *) cmsGetContextUserData(context); if (cms_exception == (CMSExceptionInfo *) NULL) return; exception=cms_exception->exception; if (exception == (ExceptionInfo *) NULL) return; image=cms_exception->image; if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s'","unknown context"); return; } if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s', %s (#%u)",image->filename, message != (char *) NULL ? message : "no message",severity); } static void TransformDoublePixels(const int id,const Image* image, const LCMSInfo *source_info,const LCMSInfo *target_info, const cmsHTRANSFORM *transform,Quantum *q) { #define GetLCMSPixel(source_info,pixel,index) \ (source_info->scale[index]*((QuantumScale*pixel)+source_info->translate[index])) #define SetLCMSPixel(target_info,pixel,index) \ ClampToQuantum(target_info->scale[index]*((QuantumRange*pixel)+target_info->translate[index])) double *p; ssize_t x; p=(double *) source_info->pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetLCMSPixel(source_info,GetPixelRed(image,q),0); if (source_info->channels > 1) { *p++=GetLCMSPixel(source_info,GetPixelGreen(image,q),1); *p++=GetLCMSPixel(source_info,GetPixelBlue(image,q),2); } if (source_info->channels > 3) *p++=GetLCMSPixel(source_info,GetPixelBlack(image,q),3); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_info->pixels[id],target_info->pixels[id], (unsigned int) image->columns); p=(double *) target_info->pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_info->channels == 1) SetPixelGray(image,SetLCMSPixel(target_info,*p,0),q); else SetPixelRed(image,SetLCMSPixel(target_info,*p,0),q); p++; if (target_info->channels > 1) { SetPixelGreen(image,SetLCMSPixel(target_info,*p,1),q); p++; SetPixelBlue(image,SetLCMSPixel(target_info,*p,2),q); p++; } if (target_info->channels > 3) { SetPixelBlack(image,SetLCMSPixel(target_info,*p,3),q); p++; } q+=GetPixelChannels(image); } } static void TransformQuantumPixels(const int id,const Image* image, const LCMSInfo *source_info,const LCMSInfo *target_info, const cmsHTRANSFORM *transform,Quantum *q) { Quantum *p; ssize_t x; p=(Quantum *) source_info->pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetPixelRed(image,q); if (source_info->channels > 1) { *p++=GetPixelGreen(image,q); *p++=GetPixelBlue(image,q); } if (source_info->channels > 3) *p++=GetPixelBlack(image,q); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_info->pixels[id],target_info->pixels[id], (unsigned int) image->columns); p=(Quantum *) target_info->pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_info->channels == 1) SetPixelGray(image,*p++,q); else SetPixelRed(image,*p++,q); if (target_info->channels > 1) { SetPixelGreen(image,*p++,q); SetPixelBlue(image,*p++,q); } if (target_info->channels > 3) SetPixelBlack(image,*p++,q); q+=GetPixelChannels(image); } } static inline void SetLCMSInfoTranslate(LCMSInfo *info,const double translate) { info->translate[0]=translate; info->translate[1]=translate; info->translate[2]=translate; info->translate[3]=translate; } static inline void SetLCMSInfoScale(LCMSInfo *info,const double scale) { info->scale[0]=scale; info->scale[1]=scale; info->scale[2]=scale; info->scale[3]=scale; } #endif static MagickBooleanType SetsRGBImageProfile(Image *image, ExceptionInfo *exception) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a, 0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67, 0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70, 0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88, 0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c, 0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24, 0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d, 0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57, 0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65, 0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c, 0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2, 0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d, 0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0, 0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87, 0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19, 0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37, 0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54, 0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72, 0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90, 0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae, 0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb, 0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d, 0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32, 0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59, 0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83, 0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1, 0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1, 0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14, 0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b, 0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84, 0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1, 0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00, 0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43, 0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a, 0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3, 0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20, 0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71, 0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4, 0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c, 0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77, 0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5, 0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37, 0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d, 0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07, 0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74, 0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5, 0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a, 0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2, 0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f, 0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf, 0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54, 0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc, 0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69, 0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9, 0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e, 0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26, 0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3, 0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64, 0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09, 0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3, 0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61, 0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13, 0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9, 0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84, 0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43, 0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06, 0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce, 0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b, 0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c, 0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41, 0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b, 0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa, 0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd, 0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5, 0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2, 0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3, 0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99, 0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94, 0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94, 0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98, 0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1, 0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf, 0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2, 0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda, 0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7, 0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18, 0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f, 0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b, 0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b, 0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1, 0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c, 0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c, 0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91, 0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb, 0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a, 0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f, 0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8, 0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37, 0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c, 0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05, 0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74, 0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8, 0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61, 0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0, 0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64, 0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee, 0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d, 0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12, 0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab, 0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b, 0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0, 0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a, 0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a, 0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00, 0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb, 0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c, 0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42, 0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f, 0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0, 0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8, 0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95, 0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78, 0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61, 0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f, 0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43, 0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d, 0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d, 0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43, 0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f, 0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60, 0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78, 0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95, 0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8, 0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1, 0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11, 0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46, 0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81, 0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2, 0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a, 0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57, 0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab, 0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04, 0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64, 0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca, 0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36, 0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8, 0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20, 0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f, 0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24, 0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf, 0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40, 0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8, 0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76, 0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a, 0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4, 0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75, 0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d, 0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea, 0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae, 0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79, 0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a, 0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21, 0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff, 0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3, 0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce, 0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf, 0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7, 0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5, 0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba, 0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6, 0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8, 0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1, 0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10, 0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36, 0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63, 0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96, 0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0, 0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11, 0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58, 0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7, 0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb, 0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57, 0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba, 0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (GetImageProfile(image,"icc") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icc",profile,exception); profile=DestroyStringInfo(profile); return(status); } MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length,ExceptionInfo *exception) { #define ProfileImageTag "Profile/Image" #ifndef TYPE_XYZ_8 #define TYPE_XYZ_8 (COLORSPACE_SH(PT_XYZ)|CHANNELS_SH(3)|BYTES_SH(1)) #endif #define ThrowProfileException(severity,tag,context) \ { \ if (profile != (StringInfo *) NULL) \ profile=DestroyStringInfo(profile); \ if (cms_context != (cmsContext) NULL) \ cmsDeleteContext(cms_context); \ if (source_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_info.profile); \ if (target_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_info.profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char *next; /* Delete image profile(s). */ ResetImageProfileIterator(image); for (next=GetNextImageProfile(image); next != (const char *) NULL; ) { if (IsOptionMember(next,name) != MagickFalse) { (void) DeleteImageProfile(image,next); ResetImageProfileIterator(image); } next=GetNextImageProfile(image); } return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile,exception); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace",exception); (void) value; if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image,exception); value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image,exception); icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (LCMS)",image->filename); #else { cmsContext cms_context; CMSExceptionInfo cms_exception; LCMSInfo source_info, target_info; /* Transform pixel colors as defined by the color profiles. */ cms_exception.image=image; cms_exception.exception=exception; cms_context=cmsCreateContext(NULL,&cms_exception); if (cms_context == (cmsContext) NULL) { profile=DestroyStringInfo(profile); ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } cmsSetLogErrorHandlerTHR(cms_context,CMSExceptionHandler); source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_info.profile == (cmsHPROFILE) NULL) { profile=DestroyStringInfo(profile); cmsDeleteContext(cms_context); ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } if ((cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile,exception); else { CacheView *image_view; cmsColorSpaceSignature signature; cmsHTRANSFORM *magick_restrict transform; cmsUInt32Number flags; MagickBooleanType highres; MagickOffsetType progress; ssize_t y; target_info.profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_info.profile=source_info.profile; source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(icc_profile),(cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_info.profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } highres=MagickTrue; #if !defined(MAGICKCORE_HDRI_SUPPORT) || (MAGICKCORE_QUANTUM_DEPTH > 16) { const char *artifact; artifact=GetImageArtifact(image,"profile:highres-transform"); if (IsStringFalse(artifact) != MagickFalse) highres=MagickFalse; } #endif SetLCMSInfoScale(&source_info,1.0); SetLCMSInfoTranslate(&source_info,0.0); source_info.colorspace=sRGBColorspace; source_info.channels=3; switch (cmsGetColorSpace(source_info.profile)) { case cmsSigCmykData: { source_info.colorspace=CMYKColorspace; source_info.channels=4; if (highres != MagickFalse) { source_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; SetLCMSInfoScale(&source_info,100.0); } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_CMYK_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { source_info.colorspace=GRAYColorspace; source_info.channels=1; if (highres != MagickFalse) source_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_GRAY_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { source_info.colorspace=LabColorspace; if (highres != MagickFalse) { source_info.type=(cmsUInt32Number) TYPE_Lab_DBL; source_info.scale[0]=100.0; source_info.scale[1]=255.0; source_info.scale[2]=255.0; source_info.translate[1]=(-0.5); source_info.translate[2]=(-0.5); } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_Lab_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } case cmsSigRgbData: { source_info.colorspace=sRGBColorspace; if (highres != MagickFalse) source_info.type=(cmsUInt32Number) TYPE_RGB_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_RGB_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { source_info.colorspace=XYZColorspace; if (highres != MagickFalse) source_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_XYZ_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } signature=cmsGetPCS(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_info.profile); SetLCMSInfoScale(&target_info,1.0); SetLCMSInfoTranslate(&target_info,0.0); target_info.channels=3; switch (signature) { case cmsSigCmykData: { target_info.colorspace=CMYKColorspace; target_info.channels=4; if (highres != MagickFalse) { target_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; SetLCMSInfoScale(&target_info,0.01); } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_CMYK_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { target_info.colorspace=GRAYColorspace; target_info.channels=1; if (highres != MagickFalse) target_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_GRAY_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { target_info.colorspace=LabColorspace; if (highres != MagickFalse) { target_info.type=(cmsUInt32Number) TYPE_Lab_DBL; target_info.scale[0]=0.01; target_info.scale[1]=1/255.0; target_info.scale[2]=1/255.0; target_info.translate[1]=0.5; target_info.translate[2]=0.5; } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_Lab_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } case cmsSigRgbData: { target_info.colorspace=sRGBColorspace; if (highres != MagickFalse) target_info.type=(cmsUInt32Number) TYPE_RGB_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_RGB_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { target_info.colorspace=XYZColorspace; if (highres != MagickFalse) target_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_XYZ_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } switch (image->rendering_intent) { case AbsoluteIntent: { target_info.intent=INTENT_ABSOLUTE_COLORIMETRIC; break; } case PerceptualIntent: { target_info.intent=INTENT_PERCEPTUAL; break; } case RelativeIntent: { target_info.intent=INTENT_RELATIVE_COLORIMETRIC; break; } case SaturationIntent: { target_info.intent=INTENT_SATURATION; break; } default: { target_info.intent=INTENT_PERCEPTUAL; break; } } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(&source_info,&target_info,flags, cms_context); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_info.pixels=AcquirePixelThreadSet(image->columns, source_info.channels,highres); target_info.pixels=AcquirePixelThreadSet(image->columns, target_info.channels,highres); if ((source_info.pixels == (void **) NULL) || (target_info.pixels == (void **) NULL)) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); if (source_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); return(MagickFalse); } if (target_info.colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_info.colorspace,exception); progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } if (highres != MagickFalse) TransformDoublePixels(id,image,&source_info,&target_info, transform,q); else TransformQuantumPixels(id,image,&source_info,&target_info, transform,q); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ProfileImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_info.colorspace,exception); switch (signature) { case cmsSigRgbData: { image->type=image->alpha_trait == UndefinedPixelTrait ? TrueColorType : TrueColorAlphaType; break; } case cmsSigCmykData: { image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; break; } case cmsSigGrayData: { image->type=image->alpha_trait == UndefinedPixelTrait ? GrayscaleType : GrayscaleAlphaType; break; } default: break; } target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); if ((status != MagickFalse) && (cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass)) status=SetImageProfile(image,name,profile,exception); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); } (void) cmsCloseProfile(source_info.profile); cmsDeleteContext(cms_context); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); WriteTo8BimProfile(image,name,(StringInfo *) NULL); profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, unsigned int *quantum) { *quantum=(unsigned int) (*p++) << 24; *quantum|=(unsigned int) (*p++) << 16; *quantum|=(unsigned int) (*p++) << 8; *quantum|=(unsigned int) (*p++); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++) << 8; *quantum|=(unsigned short) (*p++); return(p); } static inline void WriteResourceLong(unsigned char *p, const unsigned int quantum) { unsigned char buffer[4]; buffer[0]=(unsigned char) (quantum >> 24); buffer[1]=(unsigned char) (quantum >> 16); buffer[2]=(unsigned char) (quantum >> 8); buffer[3]=(unsigned char) quantum; (void) memcpy(p,buffer,4); } static void WriteTo8BimProfile(Image *image,const char *name, const StringInfo *profile) { const unsigned char *datum, *q; const unsigned char *p; size_t length; StringInfo *profile_8bim; ssize_t count; unsigned char length_byte; unsigned int value; unsigned short id, profile_id; if (LocaleCompare(name,"icc") == 0) profile_id=0x040f; else if (LocaleCompare(name,"iptc") == 0) profile_id=0x0404; else if (LocaleCompare(name,"xmp") == 0) profile_id=0x0424; else return; profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,"8bim"); if (profile_8bim == (StringInfo *) NULL) return; datum=GetStringInfoDatum(profile_8bim); length=GetStringInfoLength(profile_8bim); for (p=datum; p < (datum+length-16); ) { q=p; if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((count & 0x01) != 0) count++; if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length)) break; if (id != profile_id) p+=count; else { size_t extent, offset; ssize_t extract_extent; StringInfo *extract_profile; extract_extent=0; extent=(datum+length)-(p+count); if (profile == (StringInfo *) NULL) { offset=(q-datum); extract_profile=AcquireStringInfo(offset+extent); (void) memcpy(extract_profile->datum,datum,offset); } else { offset=(p-datum); extract_extent=profile->length; if ((extract_extent & 0x01) != 0) extract_extent++; extract_profile=AcquireStringInfo(offset+extract_extent+extent); (void) memcpy(extract_profile->datum,datum,offset-4); WriteResourceLong(extract_profile->datum+offset-4,(unsigned int) profile->length); (void) memcpy(extract_profile->datum+offset, profile->datum,profile->length); } (void) memcpy(extract_profile->datum+offset+extract_extent, p+count,extent); (void) AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString("8bim"),CloneStringInfo(extract_profile)); extract_profile=DestroyStringInfo(extract_profile); break; } } } static void GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block,ExceptionInfo *exception) { const unsigned char *datum; const unsigned char *p; size_t length; ssize_t count; StringInfo *profile; unsigned char length_byte; unsigned int value; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0)) break; switch (id) { case 0x03ed: { unsigned int resolution; unsigned short units; /* Resolution. */ if (count < 10) break; p=ReadResourceLong(p,&resolution); image->resolution.x=((double) resolution)/65536.0; p=ReadResourceShort(p,&units)+2; p=ReadResourceLong(p,&resolution)+4; image->resolution.y=((double) resolution)/65536.0; /* Values are always stored as pixels per inch. */ if ((ResolutionType) units != PixelsPerCentimeterResolution) image->units=PixelsPerInchResolution; else { image->units=PixelsPerCentimeterResolution; image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"iptc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"icc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"exif",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"xmp",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } } static void PatchCorruptProfile(const char *name,StringInfo *profile) { unsigned char *p; size_t length; /* Detect corrupt profiles and if discovered, repair. */ if (LocaleCompare(name,"xmp") == 0) { /* Remove garbage after xpacket end. */ p=GetStringInfoDatum(profile); p=(unsigned char *) strstr((const char *) p,"<?xpacket end=\"w\"?>"); if (p != (unsigned char *) NULL) { p+=19; length=p-GetStringInfoDatum(profile); if (length != GetStringInfoLength(profile)) { *p='\0'; SetStringInfoLength(profile,length); } } return; } if (LocaleCompare(name,"exif") == 0) { /* Check if profile starts with byte order marker instead of Exif. */ p=GetStringInfoDatum(profile); if ((LocaleNCompare((const char *) p,"MM",2) == 0) || (LocaleNCompare((const char *) p,"II",2) == 0)) { const unsigned char profile_start[] = "Exif\0\0"; StringInfo *exif_profile; exif_profile=AcquireStringInfo(6); if (exif_profile != (StringInfo *) NULL) { SetStringInfoDatum(exif_profile,profile_start); ConcatenateStringInfo(exif_profile,profile); SetStringInfoLength(profile,GetStringInfoLength(exif_profile)); SetStringInfo(profile,exif_profile); exif_profile=DestroyStringInfo(exif_profile); } } } } #if defined(MAGICKCORE_XML_DELEGATE) static MagickBooleanType ValidateXMPProfile(Image *image, const StringInfo *profile,ExceptionInfo *exception) { xmlDocPtr document; /* Parse XML profile. */ document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int) GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR | XML_PARSE_NOWARNING); if (document == (xmlDocPtr) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "CorruptImageProfile","`%s' (XMP)",image->filename); return(MagickFalse); } xmlFreeDoc(document); return(MagickTrue); } #else static MagickBooleanType ValidateXMPProfile(Image *image, const StringInfo *profile,ExceptionInfo *exception) { (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateWarning, "DelegateLibrarySupportNotBuiltIn","'%s' (XML)",image->filename); return(MagickFalse); } #endif static MagickBooleanType SetImageProfileInternal(Image *image,const char *name, const StringInfo *profile,const MagickBooleanType recursive, ExceptionInfo *exception) { char key[MagickPathExtent]; MagickBooleanType status; StringInfo *clone_profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); clone_profile=CloneStringInfo(profile); PatchCorruptProfile(name,clone_profile); if ((LocaleCompare(name,"xmp") == 0) && (ValidateXMPProfile(image,clone_profile,exception) == MagickFalse)) { clone_profile=DestroyStringInfo(clone_profile); return(MagickTrue); } if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MagickPathExtent); LocaleLower(key); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),clone_profile); if (status != MagickFalse) { if (LocaleCompare(name,"8bim") == 0) GetProfilesFromResourceBlock(image,clone_profile,exception); else if (recursive == MagickFalse) WriteTo8BimProfile(image,name,clone_profile); } return(status); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile,ExceptionInfo *exception) { return(SetImageProfileInternal(image,name,profile,MagickFalse,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline signed short ReadProfileShort(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned short value; if (endian == LSBEndian) { value=(unsigned short) buffer[1] << 8; value|=(unsigned short) buffer[0]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } value=(unsigned short) buffer[0] << 8; value|=(unsigned short) buffer[1]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } static inline signed int ReadProfileLong(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned int value; if (endian == LSBEndian) { value=(unsigned int) buffer[3] << 24; value|=(unsigned int) buffer[2] << 16; value|=(unsigned int) buffer[1] << 8; value|=(unsigned int) buffer[0]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } value=(unsigned int) buffer[0] << 24; value|=(unsigned int) buffer[1] << 16; value|=(unsigned int) buffer[2] << 8; value|=(unsigned int) buffer[3]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length) { signed int value; if (*length < 4) return(0); value=ReadProfileLong(MSBEndian,*p); (*length)-=4; *p+=4; return(value); } static inline signed short ReadProfileMSBShort(unsigned char **p, size_t *length) { signed short value; if (*length < 2) return(0); value=ReadProfileShort(MSBEndian,*p); (*length)-=2; *p+=2; return(value); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) memcpy(p,buffer,4); return; } buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) memcpy(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) memcpy(p,buffer,2); return; } buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) memcpy(p,buffer,2); } static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile) { size_t length; ssize_t count; unsigned char *p; unsigned short id; length=GetStringInfoLength(profile); p=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&p,&length) != 0x38) continue; if (ReadProfileByte(&p,&length) != 0x42) continue; if (ReadProfileByte(&p,&length) != 0x49) continue; if (ReadProfileByte(&p,&length) != 0x4D) continue; if (length < 7) return(MagickFalse); id=ReadProfileMSBShort(&p,&length); count=(ssize_t) ReadProfileByte(&p,&length); if ((count >= (ssize_t) length) || (count < 0)) return(MagickFalse); p+=count; length-=count; if ((*p & 0x01) == 0) (void) ReadProfileByte(&p,&length); count=(ssize_t) ReadProfileMSBLong(&p,&length); if ((count > (ssize_t) length) || (count < 0)) return(MagickFalse); if ((id == 0x3ED) && (count == 16)) { if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.x*2.54*65536.0),p); else WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.x*65536.0),p); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4); if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.y*2.54*65536.0),p+8); else WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.y*65536.0),p+8); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12); } p+=count; length-=count; } return(MagickTrue); } MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, length, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; unsigned char *directory, *exif; /* Set EXIF resolution tag. */ length=GetStringInfoLength(profile); exif=GetStringInfoDatum(profile); if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); if ((id != 0x4949) && (id != 0x4D4D)) { while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); } endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ReadProfileLong(endian,exif+4); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } if ((directory < exif) || (directory > (exif+length-2))) break; /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (q > (exif+length-12)) break; /* corrupt EXIF */ if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS)) break; components=(int) ReadProfileLong(endian,q+4); if (components < 0) break; /* corrupt EXIF */ number_bytes=(size_t) components*format_bytes[format]; if ((ssize_t) number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { /* The directory entry contains an offset. */ offset=(ssize_t) ReadProfileLong(endian,q+8); if ((offset < 0) || ((size_t) (offset+number_bytes) > length)) continue; if (~length < number_bytes) continue; /* prevent overflow */ p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) image->orientation,p); break; } (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) (image->units+1),p); break; } (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { offset=(ssize_t) ReadProfileLong(endian,p); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ReadProfileLong(endian,directory+2+(12* number_entries)); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); } MagickPrivate MagickBooleanType SyncImageProfiles(Image *image) { MagickBooleanType status; StringInfo *profile; status=MagickTrue; profile=(StringInfo *) GetImageProfile(image,"8BIM"); if (profile != (StringInfo *) NULL) if (Sync8BimProfile(image,profile) == MagickFalse) status=MagickFalse; profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile != (StringInfo *) NULL) if (SyncExifProfile(image,profile) == MagickFalse) status=MagickFalse; return(status); } static void UpdateClipPath(unsigned char *blob,size_t length, const size_t old_columns,const size_t old_rows, const RectangleInfo *new_geometry) { ssize_t i; ssize_t knot_count, selector; knot_count=0; while (length != 0) { selector=(ssize_t) ReadProfileMSBShort(&blob,&length); switch (selector) { case 0: case 3: { if (knot_count != 0) { blob+=24; length-=MagickMin(24,(ssize_t) length); break; } /* Expected subpath length record. */ knot_count=(ssize_t) ReadProfileMSBShort(&blob,&length); blob+=22; length-=MagickMin(22,(ssize_t) length); break; } case 1: case 2: case 4: case 5: { if (knot_count == 0) { /* Unexpected subpath knot. */ blob+=24; length-=MagickMin(24,(ssize_t) length); break; } /* Add sub-path knot */ for (i=0; i < 3; i++) { double x, y; signed int xx, yy; y=(double) ReadProfileMSBLong(&blob,&length); y=y*old_rows/4096.0/4096.0; y-=new_geometry->y; yy=(signed int) ((y*4096*4096)/new_geometry->height); WriteProfileLong(MSBEndian,(size_t) yy,blob-4); x=(double) ReadProfileMSBLong(&blob,&length); x=x*old_columns/4096.0/4096.0; x-=new_geometry->x; xx=(signed int) ((x*4096*4096)/new_geometry->width); WriteProfileLong(MSBEndian,(size_t) xx,blob-4); } knot_count--; break; } case 6: case 7: case 8: default: { blob+=24; length-=MagickMin(24,(ssize_t) length); break; } } } } MagickPrivate void Update8BIMClipPath(const Image *image, const size_t old_columns,const size_t old_rows, const RectangleInfo *new_geometry) { const StringInfo *profile; size_t length; ssize_t count, id; unsigned char *info; assert(image != (Image *) NULL); assert(new_geometry != (RectangleInfo *) NULL); profile=GetImageProfile(image,"8bim"); if (profile == (StringInfo *) NULL) return; length=GetStringInfoLength(profile); info=GetStringInfoDatum(profile); while (length > 0) { if (ReadProfileByte(&info,&length) != (unsigned char) '8') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'B') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'I') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'M') continue; id=(ssize_t) ReadProfileMSBShort(&info,&length); count=(ssize_t) ReadProfileByte(&info,&length); if ((count != 0) && ((size_t) count <= length)) { info+=count; length-=count; } if ((count & 0x01) == 0) (void) ReadProfileByte(&info,&length); count=(ssize_t) ReadProfileMSBLong(&info,&length); if ((count < 0) || ((size_t) count > length)) { length=0; continue; } if ((id > 1999) && (id < 2999)) UpdateClipPath(info,(size_t) count,old_columns,old_rows,new_geometry); info+=count; length-=MagickMin(count,(ssize_t) length); } }
reduction-task-2.c
int v; extern void foo (int); void bar (void) { int i; #pragma omp for reduction (task, +: v) nowait /* { dg-error "'task' reduction modifier on a construct with a 'nowait' clause" } */ for (i = 0; i < 64; i++) foo (i); #pragma omp sections nowait reduction (task, +: v) /* { dg-error "'task' reduction modifier on a construct with a 'nowait' clause" } */ { foo (-2); #pragma omp section foo (-3); } #pragma omp simd reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct other than 'parallel', 'for' or 'sections'" } */ for (i = 0; i < 64; i++) v++; #pragma omp for simd reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct combined with 'simd'" } */ for (i = 0; i < 64; i++) v++; #pragma omp parallel for simd reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct combined with 'simd'" } */ for (i = 0; i < 64; i++) v++; #pragma omp teams distribute parallel for simd reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct combined with 'simd'" } */ for (i = 0; i < 64; i++) v++; #pragma omp taskloop reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct other than 'parallel', 'for' or 'sections'" } */ for (i = 0; i < 64; i++) foo (i); #pragma omp taskloop simd reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct combined with 'simd'" } */ for (i = 0; i < 64; i++) v++; #pragma omp teams reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct other than 'parallel', 'for' or 'sections'" } */ foo (i); #pragma omp teams distribute reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct not combined with 'parallel', 'for' or 'sections'" } */ for (i = 0; i < 64; i++) foo (i); }
int v; extern void foo (int); void bar (void) { int i; for (i = 0; i < 64; i++) foo (i); foo (-2); foo (-3); } for (i = 0; i < 64; i++) v++; for (i = 0; i < 64; i++) v++; for (i = 0; i < 64; i++) v++; for (i = 0; i < 64; i++) v++; for (i = 0; i < 64; i++) foo (i); for (i = 0; i < 64; i++) v++; foo (i); for (i = 0; i < 64; i++) foo (i); }
int v; extern void foo(int); void bar(void) { int i; #pragma omp for reduction (task, +: v) nowait /* { dg-error "'task' * reduction modifier on a * construct with a 'nowait' * clause" } */ for (i = 0; i < 64; i++) foo(i); #pragma omp sections nowait reduction (task, +: v) /* { dg-error "'task' * reduction modifier on * a construct with a * 'nowait' clause" } */ { foo(-2); #pragma omp section foo(-3); } #pragma omp simd reduction (task, +: v) /* { dg-error "invalid 'task' * reduction modifier on construct * other than 'parallel', 'for' or * 'sections'" } */ for (i = 0; i < 64; i++) v++; #pragma omp for simd reduction (task, +: v) /* { dg-error "invalid 'task' * reduction modifier on * construct combined with * 'simd'" } */ for (i = 0; i < 64; i++) v++; #pragma omp parallel for simd reduction (task, +: v) /* { dg-error "invalid * 'task' reduction * modifier on construct * combined with 'simd'" * } */ for (i = 0; i < 64; i++) v++; #pragma omp teams distribute parallel for simd reduction (task, +: v) /* { dg-error "invalid * 'task' reduction * modifier on construct * combined with 'simd'" * } */ for (i = 0; i < 64; i++) v++; #pragma omp taskloop reduction (task, +: v) /* { dg-error "invalid 'task' * reduction modifier on * construct other than * 'parallel', 'for' or * 'sections'" } */ for (i = 0; i < 64; i++) foo(i); #pragma omp taskloop simd reduction (task, +: v) /* { dg-error "invalid * 'task' reduction * modifier on construct * combined with 'simd'" * } */ for (i = 0; i < 64; i++) v++; #pragma omp teams reduction (task, +: v) /* { dg-error "invalid 'task' * reduction modifier on * construct other than * 'parallel', 'for' or * 'sections'" } */ foo(i); #pragma omp teams distribute reduction (task, +: v) /* { dg-error "invalid * 'task' reduction * modifier on construct * not combined with * 'parallel', 'for' or * 'sections'" } */ for (i = 0; i < 64; i++) foo(i); }
multiple_2dvariables_simple_omp.c
#include <stdlib.h> #include <stdio.h> #include <omp.h> int main() { int** a = (int**)malloc(sizeof(int*)); int** b = (int**)malloc(sizeof(int*)); a[0] = (int*)malloc(sizeof(int)*4); b[0] = (int*)malloc(sizeof(int)*4); a[0][0] = 0; b[0][0] = 0; #pragma omp parallel { a[0][0] = 42; b[0][0] = a[0][0]; } printf("%d\n", b[0][0]); free(a[0]); free(b[0]); free(a); free(b); }
#include <stdlib.h> #include <stdio.h> #include <omp.h> int main() { int **a = (int **)malloc(sizeof(int *)); int **b = (int **)malloc(sizeof(int *)); a[0] = (int *)malloc(sizeof(int) * 4); b[0] = (int *)malloc(sizeof(int) * 4); a[0][0] = 0; b[0][0] = 0; a[0][0] = 42; b[0][0] = a[0][0]; printf("%d\n", b[0][0]); free(a[0]); free(b[0]); free(a); free(b); }
#include <stdlib.h> #include <stdio.h> #include <omp.h> int main() { int **a = (int **)malloc(sizeof(int *)); int **b = (int **)malloc(sizeof(int *)); a[0] = (int *)malloc(sizeof(int) * 4); b[0] = (int *)malloc(sizeof(int) * 4); a[0][0] = 0; b[0][0] = 0; #pragma omp parallel { a[0][0] = 42; b[0][0] = a[0][0]; } printf("%d\n", b[0][0]); free(a[0]); free(b[0]); free(a); free(b); }
GB_unop__exp2_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__exp2_fp32_fp32 // op(A') function: GB_unop_tran__exp2_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = exp2f (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = exp2f (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = exp2f (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP2 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__exp2_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = exp2f (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = exp2f (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__exp2_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__exp2_fp32_fp32 // op(A') function: GB_unop_tran__exp2_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = exp2f (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = exp2f (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = exp2f (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP2 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__exp2_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = exp2f (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = exp2f (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__exp2_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__exp2_fp32_fp32 // op(A') function: GB_unop_tran__exp2_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = exp2f (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = exp2f (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = exp2f (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP2 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__exp2_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = exp2f (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = exp2f (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__exp2_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
donde.c
/* donde.c * A hybrid MPI / OpenMP program that reports the CPU where each thread * of each rank is executing. Used to assist in determining correct * binding behavior. * Rory Kelly * 3 May 2017 */ #define _GNU_SOURCE #include <stdio.h> #include <stdlib.h> #include <sched.h> #include <unistd.h> #include <mpi.h> #include <omp.h> int main(int argc, char **argv){ int mpi_id; // MPI Task ID int n_mpi; // Number of MPI Tasks int omp_id; // OpenMP Thread ID int n_omp; // Number of OpenMP threads int my_cpu; // CPU # where task/thread is executing int mpi_tsup_lev; // provided level of MPI thread support char thrd_str[80]; // the per-thread output string char node_name[80]; // The node where process / thread is executing int length; // Length of returned string MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &mpi_tsup_lev); MPI_Comm_size(MPI_COMM_WORLD, &n_mpi); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_id); MPI_Get_processor_name(node_name, &length); // Print MPI Rank and OpenMP thread info in-order for readability for(int j=0; j<n_mpi; j++){ if(j == mpi_id){ #pragma omp parallel private(omp_id, n_omp, my_cpu, thrd_str) { omp_id = omp_get_thread_num(); n_omp = omp_get_num_threads(); my_cpu = sched_getcpu(); if (omp_id == 0){ sprintf(thrd_str, "MPI Task %2d, OpenMP thread %d of %d (cpu %d)", mpi_id, omp_id, n_omp, my_cpu); } else { sprintf(thrd_str, " OpenMP thread %d of %d (cpu %d)", omp_id, n_omp, my_cpu); } #pragma omp for ordered schedule(static, 1) for(int i=0; i<n_omp; i++){ #pragma omp ordered { puts(thrd_str); } } } } MPI_Barrier(MPI_COMM_WORLD); } return MPI_Finalize(); }
/* * donde.c A hybrid MPI / OpenMP program that reports the CPU where each * thread of each rank is executing. Used to assist in determining correct * binding behavior. Rory Kelly 3 May 2017 */ #define _GNU_SOURCE #include <stdio.h> #include <stdlib.h> #include <sched.h> #include <unistd.h> #include <mpi.h> #include <omp.h> int main(int argc, char **argv) { int mpi_id; //MPI Task ID int n_mpi; //Number of MPI Tasks int omp_id; //OpenMP Thread ID int n_omp; //Number of OpenMP threads int my_cpu; //CPU #where task/thread is executing int mpi_tsup_lev; //provided level of MPI thread support char thrd_str[80]; //the per - thread output string char node_name[80]; //The node where process / thread is executing int length; //Length of returned string MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &mpi_tsup_lev); MPI_Comm_size(MPI_COMM_WORLD, &n_mpi); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_id); MPI_Get_processor_name(node_name, &length); //Print MPI Rank and OpenMP thread info in - order for readability for (int j = 0; j < n_mpi; j++) { if (j == mpi_id) { omp_id = omp_get_thread_num(); n_omp = omp_get_num_threads(); my_cpu = sched_getcpu(); if (omp_id == 0) { sprintf(thrd_str, "MPI Task %2d, OpenMP thread %d of %d (cpu %d)", mpi_id, omp_id, n_omp, my_cpu); } else { sprintf(thrd_str, " OpenMP thread %d of %d (cpu %d)", omp_id, n_omp, my_cpu); } for (int i = 0; i < n_omp; i++) { puts(thrd_str); } } MPI_Barrier(MPI_COMM_WORLD); } return MPI_Finalize(); }
/* * donde.c A hybrid MPI / OpenMP program that reports the CPU where each * thread of each rank is executing. Used to assist in determining correct * binding behavior. Rory Kelly 3 May 2017 */ #define _GNU_SOURCE #include <stdio.h> #include <stdlib.h> #include <sched.h> #include <unistd.h> #include <mpi.h> #include <omp.h> int main(int argc, char **argv) { int mpi_id; //MPI Task ID int n_mpi; //Number of MPI Tasks int omp_id; //OpenMP Thread ID int n_omp; //Number of OpenMP threads int my_cpu; //CPU #where task/thread is executing int mpi_tsup_lev; //provided level of MPI thread support char thrd_str[80]; //the per - thread output string char node_name[80]; //The node where process / thread is executing int length; //Length of returned string MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &mpi_tsup_lev); MPI_Comm_size(MPI_COMM_WORLD, &n_mpi); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_id); MPI_Get_processor_name(node_name, &length); //Print MPI Rank and OpenMP thread info in - order for readability for (int j = 0; j < n_mpi; j++) { if (j == mpi_id) { #pragma omp parallel private(omp_id, n_omp, my_cpu, thrd_str) { omp_id = omp_get_thread_num(); n_omp = omp_get_num_threads(); my_cpu = sched_getcpu(); if (omp_id == 0) { sprintf(thrd_str, "MPI Task %2d, OpenMP thread %d of %d (cpu %d)", mpi_id, omp_id, n_omp, my_cpu); } else { sprintf(thrd_str, " OpenMP thread %d of %d (cpu %d)", omp_id, n_omp, my_cpu); } #pragma omp for ordered schedule(static, 1) for (int i = 0; i < n_omp; i++) { #pragma omp ordered { puts(thrd_str); } } } } MPI_Barrier(MPI_COMM_WORLD); } return MPI_Finalize(); }
matrix_sum_omp.c
/* --- File matrix_sum_omp.c --- */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> int main(int argc, char **argv) { struct timespec ts_start, ts_end; int size = 1e4; int **a, *c; int i, j; float time_total; /* Allocate memory */ c = malloc(size * sizeof(int)); a = (int **)malloc(size * sizeof(int *)); for (i = 0; i < size; i++) a[i] = malloc(size * sizeof(int)); /* Set all matrix elements to 1 */ for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { a[i][j] = 1; } } /* Zero the accumulator */ for (i = 0; i < size; i++) { c[i] = 0; } clock_gettime(CLOCK_MONOTONIC, &ts_start); #pragma omp parallel for /* Each thread sums one column */ for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { c[i] += a[i][j]; } } int total = 0; /* Add sums of all columns together */ for (i = 0; i < size; i++) { total += c[i]; } clock_gettime(CLOCK_MONOTONIC, &ts_end); time_total = (ts_end.tv_sec - ts_start.tv_sec) * 1e9 + (ts_end.tv_nsec - ts_start.tv_nsec); printf("Total is %d, time is %f ms\n", total, time_total / 1e6); }
/* --- File matrix_sum_omp.c --- */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> int main(int argc, char **argv) { struct timespec ts_start, ts_end; int size = 1e4; int **a, *c; int i, j; float time_total; /* Allocate memory */ c = malloc(size * sizeof(int)); a = (int **)malloc(size * sizeof(int *)); for (i = 0; i < size; i++) a[i] = malloc(size * sizeof(int)); /* Set all matrix elements to 1 */ for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { a[i][j] = 1; } } /* Zero the accumulator */ for (i = 0; i < size; i++) { c[i] = 0; } clock_gettime(CLOCK_MONOTONIC, &ts_start); /* Each thread sums one column */ for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { c[i] += a[i][j]; } } int total = 0; /* Add sums of all columns together */ for (i = 0; i < size; i++) { total += c[i]; } clock_gettime(CLOCK_MONOTONIC, &ts_end); time_total = (ts_end.tv_sec - ts_start.tv_sec) * 1e9 + (ts_end.tv_nsec - ts_start.tv_nsec); printf("Total is %d, time is %f ms\n", total, time_total / 1e6); }
/* --- File matrix_sum_omp.c --- */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> int main(int argc, char **argv) { struct timespec ts_start, ts_end; int size = 1e4; int **a, *c; int i, j; float time_total; /* Allocate memory */ c = malloc(size * sizeof(int)); a = (int **)malloc(size * sizeof(int *)); for (i = 0; i < size; i++) a[i] = malloc(size * sizeof(int)); /* Set all matrix elements to 1 */ for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { a[i][j] = 1; } } /* Zero the accumulator */ for (i = 0; i < size; i++) { c[i] = 0; } clock_gettime(CLOCK_MONOTONIC, &ts_start); #pragma omp parallel for /* Each thread sums one column */ for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { c[i] += a[i][j]; } } int total = 0; /* Add sums of all columns together */ for (i = 0; i < size; i++) { total += c[i]; } clock_gettime(CLOCK_MONOTONIC, &ts_end); time_total = (ts_end.tv_sec - ts_start.tv_sec) * 1e9 + (ts_end.tv_nsec - ts_start.tv_nsec); printf("Total is %d, time is %f ms\n", total, time_total / 1e6); }
GB_unaryop__ainv_fp32_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_fp32_int8 // op(A') function: GB_tran__ainv_fp32_int8 // C type: float // A type: int8_t // cast: float cij = (float) aij // unaryop: cij = -aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_fp32_int8 ( float *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_fp32_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_fp32_int8 // op(A') function: GB_tran__ainv_fp32_int8 // C type: float // A type: int8_t // cast: float cij = (float) aij // unaryop: cij = -aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_fp32_int8 ( float *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_fp32_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_fp32_int8 // op(A') function: GB_tran__ainv_fp32_int8 // C type: float // A type: int8_t // cast: float cij = (float) aij // unaryop: cij = -aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_fp32_int8 ( float *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_fp32_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bxnor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bxnor_int16 // A.*B function (eWiseMult): GB_AemultB__bxnor_int16 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bxnor_int16 // C+=b function (dense accum): GB_Cdense_accumb__bxnor_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bxnor_int16 // C=scalar+B GB_bind1st__bxnor_int16 // C=scalar+B' GB_bind1st_tran__bxnor_int16 // C=A+scalar GB_bind2nd__bxnor_int16 // C=A'+scalar GB_bind2nd_tran__bxnor_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ~((x) ^ (y)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_INT16 || GxB_NO_BXNOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bxnor_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bxnor_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bxnor_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bxnor_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bxnor_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bxnor_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bxnor_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB_bind1st_tran__bxnor_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB_bind2nd_tran__bxnor_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bxnor_int16 // A.*B function (eWiseMult): GB_AemultB__bxnor_int16 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bxnor_int16 // C+=b function (dense accum): GB_Cdense_accumb__bxnor_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bxnor_int16 // C=scalar+B GB_bind1st__bxnor_int16 // C=scalar+B' GB_bind1st_tran__bxnor_int16 // C=A+scalar GB_bind2nd__bxnor_int16 // C=A'+scalar GB_bind2nd_tran__bxnor_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ~((x) ^ (y)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_INT16 || GxB_NO_BXNOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bxnor_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bxnor_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bxnor_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bxnor_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bxnor_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bxnor_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bxnor_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB_bind1st_tran__bxnor_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB_bind2nd_tran__bxnor_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bxnor_int16 // A.*B function (eWiseMult): GB_AemultB__bxnor_int16 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bxnor_int16 // C+=b function (dense accum): GB_Cdense_accumb__bxnor_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bxnor_int16 // C=scalar+B GB_bind1st__bxnor_int16 // C=scalar+B' GB_bind1st_tran__bxnor_int16 // C=A+scalar GB_bind2nd__bxnor_int16 // C=A'+scalar GB_bind2nd_tran__bxnor_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ~((x) ^ (y)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_INT16 || GxB_NO_BXNOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bxnor_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bxnor_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bxnor_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bxnor_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bxnor_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bxnor_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bxnor_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB_bind1st_tran__bxnor_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB_bind2nd_tran__bxnor_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
devito-kernel-omp-trivial.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" struct dataobj { void *restrict data; int * size; int * npsize; int * dsize; int * hsize; int * hofs; int * oofs; } ; struct profiler { double section0; } ; int Kernel(struct dataobj *restrict v_vec, const int time_M, const int time_m, struct profiler * timers, const int x_M, const int x_m, const int y_M, const int y_m) { float (*restrict v)[v_vec->size[1]][v_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[v_vec->size[1]][v_vec->size[2]]) v_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); #pragma omp target data map(tofrom: v[time_m:time_M][x_m:x_M][y_m:y_M]) for (int time = time_m, t0 = (time)%(2), t1 = (time + 1)%(2); time <= time_M; time += 1, t0 = (time)%(2), t1 = (time + 1)%(2)) { struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ #pragma omp target #pragma omp parallel for for (int x = x_m; x <= x_M; x += 1) { for (int y = y_m; y <= y_M; y += 1) { v[t1][x + 1][y + 1] = v[t0][x + 1][y + 1] + 1; } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000; } return 0; }
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; }; int Kernel(struct dataobj *restrict v_vec, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m) { float (*restrict v)[v_vec->size[1]][v_vec->size[2]] __attribute__((aligned(64))) = (float (*)[v_vec->size[1]][v_vec->size[2]])v_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); for (int time = time_m, t0 = (time) % (2), t1 = (time + 1) % (2); time <= time_M; time += 1, t0 = (time) % (2), t1 = (time + 1) % (2)) { struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ for (int x = x_m; x <= x_M; x += 1) { for (int y = y_m; y <= y_M; y += 1) { v[t1][x + 1][y + 1] = v[t0][x + 1][y + 1] + 1; } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; } return 0; }
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; }; int Kernel(struct dataobj *restrict v_vec, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m) { float (*restrict v)[v_vec->size[1]][v_vec->size[2]] __attribute__((aligned(64))) = (float (*)[v_vec->size[1]][v_vec->size[2]])v_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); #pragma omp target data map(tofrom: v[time_m:time_M][x_m:x_M][y_m:y_M]) for (int time = time_m, t0 = (time) % (2), t1 = (time + 1) % (2); time <= time_M; time += 1, t0 = (time) % (2), t1 = (time + 1) % (2)) { struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ #pragma omp target #pragma omp parallel for for (int x = x_m; x <= x_M; x += 1) { for (int y = y_m; y <= y_M; y += 1) { v[t1][x + 1][y + 1] = v[t0][x + 1][y + 1] + 1; } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; } return 0; }
GB_unaryop__abs_int32_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_int64 // op(A') function: GB_tran__abs_int32_int64 // C type: int32_t // A type: int64_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int64_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_int64 ( int32_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_int64 // op(A') function: GB_tran__abs_int32_int64 // C type: int32_t // A type: int64_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int64_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_int64 ( int32_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_int64 // op(A') function: GB_tran__abs_int32_int64 // C type: int32_t // A type: int64_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int64_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_int64 ( int32_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
_kdtree_core.c
/* pykdtree, Fast kd-tree implementation with OpenMP-enabled queries Copyright (C) 2013 - present Esben S. Nielsen This program is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* This kd-tree implementation is based on the scipy.spatial.cKDTree by Anne M. Archibald and libANN by David M. Mount and Sunil Arya. */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <float.h> #define PA(i,d) (pa[no_dims * pidx[i] + d]) #define PASWAP(a,b) { uint32_t tmp = pidx[a]; pidx[a] = pidx[b]; pidx[b] = tmp; } #ifdef _MSC_VER #define restrict __restrict #endif typedef struct { float cut_val; int8_t cut_dim; uint32_t start_idx; uint32_t n; float cut_bounds_lv; float cut_bounds_hv; struct Node_float *left_child; struct Node_float *right_child; } Node_float; typedef struct { float *bbox; int8_t no_dims; uint32_t *pidx; struct Node_float *root; } Tree_float; typedef struct { double cut_val; int8_t cut_dim; uint32_t start_idx; uint32_t n; double cut_bounds_lv; double cut_bounds_hv; struct Node_double *left_child; struct Node_double *right_child; } Node_double; typedef struct { double *bbox; int8_t no_dims; uint32_t *pidx; struct Node_double *root; } Tree_double; void insert_point_float(uint32_t *closest_idx, float *closest_dist, uint32_t pidx, float cur_dist, uint32_t k); void get_bounding_box_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t n, float *bbox); int partition_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *bbox, int8_t *cut_dim, float *cut_val, uint32_t *n_lo); Tree_float* construct_tree_float(float *pa, int8_t no_dims, uint32_t n, uint32_t bsp); Node_float* construct_subtree_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, float *bbox); Node_float * create_node_float(uint32_t start_idx, uint32_t n, int is_leaf); void delete_subtree_float(Node_float *root); void delete_tree_float(Tree_float *tree); void print_tree_float(Node_float *root, int level); float calc_dist_float(float *point1_coord, float *point2_coord, int8_t no_dims); float get_cube_offset_float(int8_t dim, float *point_coord, float *bbox); float get_min_dist_float(float *point_coord, int8_t no_dims, float *bbox); void search_leaf_float(float *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord, uint32_t k, uint32_t *restrict closest_idx, float *restrict closest_dist); void search_leaf_float_mask(float *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord, uint32_t k, uint8_t *restrict mask, uint32_t *restrict closest_idx, float *restrict closest_dist); void search_splitnode_float(Node_float *root, float *pa, uint32_t *pidx, int8_t no_dims, float *point_coord, float min_dist, uint32_t k, float distance_upper_bound, float eps_fac, uint8_t *mask, uint32_t * closest_idx, float *closest_dist); void search_tree_float(Tree_float *tree, float *pa, float *point_coords, uint32_t num_points, uint32_t k, float distance_upper_bound, float eps, uint8_t *mask, uint32_t *closest_idxs, float *closest_dists); void insert_point_double(uint32_t *closest_idx, double *closest_dist, uint32_t pidx, double cur_dist, uint32_t k); void get_bounding_box_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t n, double *bbox); int partition_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *bbox, int8_t *cut_dim, double *cut_val, uint32_t *n_lo); Tree_double* construct_tree_double(double *pa, int8_t no_dims, uint32_t n, uint32_t bsp); Node_double* construct_subtree_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, double *bbox); Node_double * create_node_double(uint32_t start_idx, uint32_t n, int is_leaf); void delete_subtree_double(Node_double *root); void delete_tree_double(Tree_double *tree); void print_tree_double(Node_double *root, int level); double calc_dist_double(double *point1_coord, double *point2_coord, int8_t no_dims); double get_cube_offset_double(int8_t dim, double *point_coord, double *bbox); double get_min_dist_double(double *point_coord, int8_t no_dims, double *bbox); void search_leaf_double(double *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord, uint32_t k, uint32_t *restrict closest_idx, double *restrict closest_dist); void search_leaf_double_mask(double *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord, uint32_t k, uint8_t *restrict mask, uint32_t *restrict closest_idx, double *restrict closest_dist); void search_splitnode_double(Node_double *root, double *pa, uint32_t *pidx, int8_t no_dims, double *point_coord, double min_dist, uint32_t k, double distance_upper_bound, double eps_fac, uint8_t *mask, uint32_t * closest_idx, double *closest_dist); void search_tree_double(Tree_double *tree, double *pa, double *point_coords, uint32_t num_points, uint32_t k, double distance_upper_bound, double eps, uint8_t *mask, uint32_t *closest_idxs, double *closest_dists); /************************************************ Insert point into priority queue Params: closest_idx : index queue closest_dist : distance queue pidx : permutation index of data points cur_dist : distance to point inserted k : number of neighbours ************************************************/ void insert_point_float(uint32_t *closest_idx, float *closest_dist, uint32_t pidx, float cur_dist, uint32_t k) { int i; for (i = k - 1; i > 0; i--) { if (closest_dist[i - 1] > cur_dist) { closest_dist[i] = closest_dist[i - 1]; closest_idx[i] = closest_idx[i - 1]; } else { break; } } closest_idx[i] = pidx; closest_dist[i] = cur_dist; } /************************************************ Get the bounding box of a set of points Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions n : number of points bbox : bounding box (return) ************************************************/ void get_bounding_box_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t n, float *bbox) { float cur; int8_t bbox_idx, i, j; uint32_t i2; /* Use first data point to initialize */ for (i = 0; i < no_dims; i++) { bbox[2 * i] = bbox[2 * i + 1] = PA(0, i); } /* Update using rest of data points */ for (i2 = 1; i2 < n; i2++) { for (j = 0; j < no_dims; j++) { bbox_idx = 2 * j; cur = PA(i2, j); if (cur < bbox[bbox_idx]) { bbox[bbox_idx] = cur; } else if (cur > bbox[bbox_idx + 1]) { bbox[bbox_idx + 1] = cur; } } } } /************************************************ Partition a range of data points by manipulation the permutation index. The sliding midpoint rule is used for the partitioning. Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions start_idx : index of first data point to use n : number of data points bbox : bounding box of data points cut_dim : dimension used for partition (return) cut_val : value of cutting point (return) n_lo : number of point below cutting plane (return) ************************************************/ int partition_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *bbox, int8_t *cut_dim, float *cut_val, uint32_t *n_lo) { int8_t dim = 0, i; uint32_t p, q, i2; float size = 0, min_val, max_val, split, side_len, cur_val; uint32_t end_idx = start_idx + n - 1; /* Find largest bounding box side */ for (i = 0; i < no_dims; i++) { side_len = bbox[2 * i + 1] - bbox[2 * i]; if (side_len > size) { dim = i; size = side_len; } } min_val = bbox[2 * dim]; max_val = bbox[2 * dim + 1]; /* Check for zero length or inconsistent */ if (min_val >= max_val) return 1; /* Use middle for splitting */ split = (min_val + max_val) / 2; /* Partition all data points around middle */ p = start_idx; q = end_idx; while (p <= q) { if (PA(p, dim) < split) { p++; } else if (PA(q, dim) >= split) { /* Guard for underflow */ if (q > 0) { q--; } else { break; } } else { PASWAP(p, q); p++; q--; } } /* Check for empty splits */ if (p == start_idx) { /* No points less than split. Split at lowest point instead. Minimum 1 point will be in lower box. */ uint32_t j = start_idx; split = PA(j, dim); for (i2 = start_idx + 1; i2 <= end_idx; i2++) { /* Find lowest point */ cur_val = PA(i2, dim); if (cur_val < split) { j = i2; split = cur_val; } } PASWAP(j, start_idx); p = start_idx + 1; } else if (p == end_idx + 1) { /* No points greater than split. Split at highest point instead. Minimum 1 point will be in higher box. */ uint32_t j = end_idx; split = PA(j, dim); for (i2 = start_idx; i2 < end_idx; i2++) { /* Find highest point */ cur_val = PA(i2, dim); if (cur_val > split) { j = i2; split = cur_val; } } PASWAP(j, end_idx); p = end_idx; } /* Set return values */ *cut_dim = dim; *cut_val = split; *n_lo = p - start_idx; return 0; } /************************************************ Construct a sub tree over a range of data points. Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions start_idx : index of first data point to use n : number of data points bsp : number of points per leaf bbox : bounding box of set of data points ************************************************/ Node_float* construct_subtree_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, float *bbox) { /* Create new node */ int is_leaf = (n <= bsp); Node_float *root = create_node_float(start_idx, n, is_leaf); int rval; int8_t cut_dim; uint32_t n_lo; float cut_val, lv, hv; if (is_leaf) { /* Make leaf node */ root->cut_dim = -1; } else { /* Make split node */ /* Partition data set and set node info */ rval = partition_float(pa, pidx, no_dims, start_idx, n, bbox, &cut_dim, &cut_val, &n_lo); if (rval == 1) { root->cut_dim = -1; return root; } root->cut_val = cut_val; root->cut_dim = cut_dim; /* Recurse on both subsets */ lv = bbox[2 * cut_dim]; hv = bbox[2 * cut_dim + 1]; /* Set bounds for cut dimension */ root->cut_bounds_lv = lv; root->cut_bounds_hv = hv; /* Update bounding box before call to lower subset and restore after */ bbox[2 * cut_dim + 1] = cut_val; root->left_child = (struct Node_float *)construct_subtree_float(pa, pidx, no_dims, start_idx, n_lo, bsp, bbox); bbox[2 * cut_dim + 1] = hv; /* Update bounding box before call to higher subset and restore after */ bbox[2 * cut_dim] = cut_val; root->right_child = (struct Node_float *)construct_subtree_float(pa, pidx, no_dims, start_idx + n_lo, n - n_lo, bsp, bbox); bbox[2 * cut_dim] = lv; } return root; } /************************************************ Construct a tree over data points. Params: pa : data points no_dims: number of dimensions n : number of data points bsp : number of points per leaf ************************************************/ Tree_float* construct_tree_float(float *pa, int8_t no_dims, uint32_t n, uint32_t bsp) { Tree_float *tree = (Tree_float *)malloc(sizeof(Tree_float)); uint32_t i; uint32_t *pidx; float *bbox; tree->no_dims = no_dims; /* Initialize permutation array */ pidx = (uint32_t *)malloc(sizeof(uint32_t) * n); for (i = 0; i < n; i++) { pidx[i] = i; } bbox = (float *)malloc(2 * sizeof(float) * no_dims); get_bounding_box_float(pa, pidx, no_dims, n, bbox); tree->bbox = bbox; /* Construct subtree on full dataset */ tree->root = (struct Node_float *)construct_subtree_float(pa, pidx, no_dims, 0, n, bsp, bbox); tree->pidx = pidx; return tree; } /************************************************ Create a tree node. Params: start_idx : index of first data point to use n : number of data points ************************************************/ Node_float* create_node_float(uint32_t start_idx, uint32_t n, int is_leaf) { Node_float *new_node; if (is_leaf) { /* Allocate only the part of the struct that will be used in a leaf node. This relies on the C99 specification of struct layout conservation and padding and that dereferencing is never attempted for the node pointers in a leaf. */ new_node = (Node_float *)malloc(sizeof(Node_float) - 2 * sizeof(Node_float *)); } else { new_node = (Node_float *)malloc(sizeof(Node_float)); } new_node->n = n; new_node->start_idx = start_idx; return new_node; } /************************************************ Delete subtree Params: root : root node of subtree to delete ************************************************/ void delete_subtree_float(Node_float *root) { if (root->cut_dim != -1) { delete_subtree_float((Node_float *)root->left_child); delete_subtree_float((Node_float *)root->right_child); } free(root); } /************************************************ Delete tree Params: tree : Tree struct of kd tree ************************************************/ void delete_tree_float(Tree_float *tree) { delete_subtree_float((Node_float *)tree->root); free(tree->bbox); free(tree->pidx); free(tree); } /************************************************ Print ************************************************/ void print_tree_float(Node_float *root, int level) { int i; for (i = 0; i < level; i++) { printf(" "); } printf("(cut_val: %f, cut_dim: %i)\n", root->cut_val, root->cut_dim); if (root->cut_dim != -1) print_tree_float((Node_float *)root->left_child, level + 1); if (root->cut_dim != -1) print_tree_float((Node_float *)root->right_child, level + 1); } /************************************************ Calculate squared cartesian distance between points Params: point1_coord : point 1 point2_coord : point 2 ************************************************/ float calc_dist_float(float *point1_coord, float *point2_coord, int8_t no_dims) { /* Calculate squared distance */ float dist = 0, dim_dist; int8_t i; for (i = 0; i < no_dims; i++) { dim_dist = point2_coord[i] - point1_coord[i]; dist += dim_dist * dim_dist; } return dist; } /************************************************ Get squared distance from point to cube in specified dimension Params: dim : dimension point_coord : cartesian coordinates of point bbox : cube ************************************************/ float get_cube_offset_float(int8_t dim, float *point_coord, float *bbox) { float dim_coord = point_coord[dim]; if (dim_coord < bbox[2 * dim]) { /* Left of cube in dimension */ return dim_coord - bbox[2 * dim]; } else if (dim_coord > bbox[2 * dim + 1]) { /* Right of cube in dimension */ return dim_coord - bbox[2 * dim + 1]; } else { /* Inside cube in dimension */ return 0.; } } /************************************************ Get minimum squared distance between point and cube. Params: point_coord : cartesian coordinates of point no_dims : number of dimensions bbox : cube ************************************************/ float get_min_dist_float(float *point_coord, int8_t no_dims, float *bbox) { float cube_offset = 0, cube_offset_dim; int8_t i; for (i = 0; i < no_dims; i++) { cube_offset_dim = get_cube_offset_float(i, point_coord, bbox); cube_offset += cube_offset_dim * cube_offset_dim; } return cube_offset; } /************************************************ Search a leaf node for closest point Params: pa : data points pidx : permutation index of data points no_dims : number of dimensions start_idx : index of first data point to use size : number of data points point_coord : query point closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_leaf_float(float *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord, uint32_t k, uint32_t *restrict closest_idx, float *restrict closest_dist) { float cur_dist; uint32_t i; /* Loop through all points in leaf */ for (i = 0; i < n; i++) { /* Get distance to query point */ cur_dist = calc_dist_float(&PA(start_idx + i, 0), point_coord, no_dims); /* Update closest info if new point is closest so far*/ if (cur_dist < closest_dist[k - 1]) { insert_point_float(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k); } } } /************************************************ Search a leaf node for closest point with data point mask Params: pa : data points pidx : permutation index of data points no_dims : number of dimensions start_idx : index of first data point to use size : number of data points point_coord : query point mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_leaf_float_mask(float *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord, uint32_t k, uint8_t *mask, uint32_t *restrict closest_idx, float *restrict closest_dist) { float cur_dist; uint32_t i; /* Loop through all points in leaf */ for (i = 0; i < n; i++) { /* Is this point masked out? */ if (mask[pidx[start_idx + i]]) { continue; } /* Get distance to query point */ cur_dist = calc_dist_float(&PA(start_idx + i, 0), point_coord, no_dims); /* Update closest info if new point is closest so far*/ if (cur_dist < closest_dist[k - 1]) { insert_point_float(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k); } } } /************************************************ Search subtree for nearest to query point Params: root : root node of subtree pa : data points pidx : permutation index of data points no_dims : number of dimensions point_coord : query point min_dist : minumum distance to nearest neighbour mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_splitnode_float(Node_float *root, float *pa, uint32_t *pidx, int8_t no_dims, float *point_coord, float min_dist, uint32_t k, float distance_upper_bound, float eps_fac, uint8_t *mask, uint32_t *closest_idx, float *closest_dist) { int8_t dim; float dist_left, dist_right; float new_offset; float box_diff; /* Skip if distance bound exeeded */ if (min_dist > distance_upper_bound) { return; } dim = root->cut_dim; /* Handle leaf node */ if (dim == -1) { if (mask) { search_leaf_float_mask(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, mask, closest_idx, closest_dist); } else { search_leaf_float(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, closest_idx, closest_dist); } return; } /* Get distance to cutting plane */ new_offset = point_coord[dim] - root->cut_val; if (new_offset < 0) { /* Left of cutting plane */ dist_left = min_dist; if (dist_left < closest_dist[k - 1] * eps_fac) { /* Search left subtree if minimum distance is below limit */ search_splitnode_float((Node_float *)root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } /* Right of cutting plane. Update minimum distance. See Algorithms for Fast Vector Quantization Sunil Arya and David M. Mount. */ box_diff = root->cut_bounds_lv - point_coord[dim]; if (box_diff < 0) { box_diff = 0; } dist_right = min_dist - box_diff * box_diff + new_offset * new_offset; if (dist_right < closest_dist[k - 1] * eps_fac) { /* Search right subtree if minimum distance is below limit*/ search_splitnode_float((Node_float *)root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } } else { /* Right of cutting plane */ dist_right = min_dist; if (dist_right < closest_dist[k - 1] * eps_fac) { /* Search right subtree if minimum distance is below limit*/ search_splitnode_float((Node_float *)root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } /* Left of cutting plane. Update minimum distance. See Algorithms for Fast Vector Quantization Sunil Arya and David M. Mount. */ box_diff = point_coord[dim] - root->cut_bounds_hv; if (box_diff < 0) { box_diff = 0; } dist_left = min_dist - box_diff * box_diff + new_offset * new_offset; if (dist_left < closest_dist[k - 1] * eps_fac) { /* Search left subtree if minimum distance is below limit*/ search_splitnode_float((Node_float *)root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } } } /************************************************ Search for nearest neighbour for a set of query points Params: tree : Tree struct of kd tree pa : data points pidx : permutation index of data points point_coords : query points num_points : number of query points mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_tree_float(Tree_float *tree, float *pa, float *point_coords, uint32_t num_points, uint32_t k, float distance_upper_bound, float eps, uint8_t *mask, uint32_t *closest_idxs, float *closest_dists) { float min_dist; float eps_fac = 1 / ((1 + eps) * (1 + eps)); int8_t no_dims = tree->no_dims; float *bbox = tree->bbox; uint32_t *pidx = tree->pidx; uint32_t j = 0; #if defined(_MSC_VER) && defined(_OPENMP) int32_t i = 0; int32_t local_num_points = (int32_t) num_points; #else uint32_t i; uint32_t local_num_points = num_points; #endif Node_float *root = (Node_float *)tree->root; /* Queries are OpenMP enabled */ #pragma omp parallel { /* The low chunk size is important to avoid L2 cache trashing for spatial coherent query datasets */ #pragma omp for private(i, j) schedule(static, 100) nowait for (i = 0; i < local_num_points; i++) { for (j = 0; j < k; j++) { closest_idxs[i * k + j] = UINT32_MAX; closest_dists[i * k + j] = DBL_MAX; } min_dist = get_min_dist_float(point_coords + no_dims * i, no_dims, bbox); search_splitnode_float(root, pa, pidx, no_dims, point_coords + no_dims * i, min_dist, k, distance_upper_bound, eps_fac, mask, &closest_idxs[i * k], &closest_dists[i * k]); } } } /************************************************ Insert point into priority queue Params: closest_idx : index queue closest_dist : distance queue pidx : permutation index of data points cur_dist : distance to point inserted k : number of neighbours ************************************************/ void insert_point_double(uint32_t *closest_idx, double *closest_dist, uint32_t pidx, double cur_dist, uint32_t k) { int i; for (i = k - 1; i > 0; i--) { if (closest_dist[i - 1] > cur_dist) { closest_dist[i] = closest_dist[i - 1]; closest_idx[i] = closest_idx[i - 1]; } else { break; } } closest_idx[i] = pidx; closest_dist[i] = cur_dist; } /************************************************ Get the bounding box of a set of points Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions n : number of points bbox : bounding box (return) ************************************************/ void get_bounding_box_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t n, double *bbox) { double cur; int8_t bbox_idx, i, j; uint32_t i2; /* Use first data point to initialize */ for (i = 0; i < no_dims; i++) { bbox[2 * i] = bbox[2 * i + 1] = PA(0, i); } /* Update using rest of data points */ for (i2 = 1; i2 < n; i2++) { for (j = 0; j < no_dims; j++) { bbox_idx = 2 * j; cur = PA(i2, j); if (cur < bbox[bbox_idx]) { bbox[bbox_idx] = cur; } else if (cur > bbox[bbox_idx + 1]) { bbox[bbox_idx + 1] = cur; } } } } /************************************************ Partition a range of data points by manipulation the permutation index. The sliding midpoint rule is used for the partitioning. Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions start_idx : index of first data point to use n : number of data points bbox : bounding box of data points cut_dim : dimension used for partition (return) cut_val : value of cutting point (return) n_lo : number of point below cutting plane (return) ************************************************/ int partition_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *bbox, int8_t *cut_dim, double *cut_val, uint32_t *n_lo) { int8_t dim = 0, i; uint32_t p, q, i2; double size = 0, min_val, max_val, split, side_len, cur_val; uint32_t end_idx = start_idx + n - 1; /* Find largest bounding box side */ for (i = 0; i < no_dims; i++) { side_len = bbox[2 * i + 1] - bbox[2 * i]; if (side_len > size) { dim = i; size = side_len; } } min_val = bbox[2 * dim]; max_val = bbox[2 * dim + 1]; /* Check for zero length or inconsistent */ if (min_val >= max_val) return 1; /* Use middle for splitting */ split = (min_val + max_val) / 2; /* Partition all data points around middle */ p = start_idx; q = end_idx; while (p <= q) { if (PA(p, dim) < split) { p++; } else if (PA(q, dim) >= split) { /* Guard for underflow */ if (q > 0) { q--; } else { break; } } else { PASWAP(p, q); p++; q--; } } /* Check for empty splits */ if (p == start_idx) { /* No points less than split. Split at lowest point instead. Minimum 1 point will be in lower box. */ uint32_t j = start_idx; split = PA(j, dim); for (i2 = start_idx + 1; i2 <= end_idx; i2++) { /* Find lowest point */ cur_val = PA(i2, dim); if (cur_val < split) { j = i2; split = cur_val; } } PASWAP(j, start_idx); p = start_idx + 1; } else if (p == end_idx + 1) { /* No points greater than split. Split at highest point instead. Minimum 1 point will be in higher box. */ uint32_t j = end_idx; split = PA(j, dim); for (i2 = start_idx; i2 < end_idx; i2++) { /* Find highest point */ cur_val = PA(i2, dim); if (cur_val > split) { j = i2; split = cur_val; } } PASWAP(j, end_idx); p = end_idx; } /* Set return values */ *cut_dim = dim; *cut_val = split; *n_lo = p - start_idx; return 0; } /************************************************ Construct a sub tree over a range of data points. Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions start_idx : index of first data point to use n : number of data points bsp : number of points per leaf bbox : bounding box of set of data points ************************************************/ Node_double* construct_subtree_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, double *bbox) { /* Create new node */ int is_leaf = (n <= bsp); Node_double *root = create_node_double(start_idx, n, is_leaf); int rval; int8_t cut_dim; uint32_t n_lo; double cut_val, lv, hv; if (is_leaf) { /* Make leaf node */ root->cut_dim = -1; } else { /* Make split node */ /* Partition data set and set node info */ rval = partition_double(pa, pidx, no_dims, start_idx, n, bbox, &cut_dim, &cut_val, &n_lo); if (rval == 1) { root->cut_dim = -1; return root; } root->cut_val = cut_val; root->cut_dim = cut_dim; /* Recurse on both subsets */ lv = bbox[2 * cut_dim]; hv = bbox[2 * cut_dim + 1]; /* Set bounds for cut dimension */ root->cut_bounds_lv = lv; root->cut_bounds_hv = hv; /* Update bounding box before call to lower subset and restore after */ bbox[2 * cut_dim + 1] = cut_val; root->left_child = (struct Node_double *)construct_subtree_double(pa, pidx, no_dims, start_idx, n_lo, bsp, bbox); bbox[2 * cut_dim + 1] = hv; /* Update bounding box before call to higher subset and restore after */ bbox[2 * cut_dim] = cut_val; root->right_child = (struct Node_double *)construct_subtree_double(pa, pidx, no_dims, start_idx + n_lo, n - n_lo, bsp, bbox); bbox[2 * cut_dim] = lv; } return root; } /************************************************ Construct a tree over data points. Params: pa : data points no_dims: number of dimensions n : number of data points bsp : number of points per leaf ************************************************/ Tree_double* construct_tree_double(double *pa, int8_t no_dims, uint32_t n, uint32_t bsp) { Tree_double *tree = (Tree_double *)malloc(sizeof(Tree_double)); uint32_t i; uint32_t *pidx; double *bbox; tree->no_dims = no_dims; /* Initialize permutation array */ pidx = (uint32_t *)malloc(sizeof(uint32_t) * n); for (i = 0; i < n; i++) { pidx[i] = i; } bbox = (double *)malloc(2 * sizeof(double) * no_dims); get_bounding_box_double(pa, pidx, no_dims, n, bbox); tree->bbox = bbox; /* Construct subtree on full dataset */ tree->root = (struct Node_double *)construct_subtree_double(pa, pidx, no_dims, 0, n, bsp, bbox); tree->pidx = pidx; return tree; } /************************************************ Create a tree node. Params: start_idx : index of first data point to use n : number of data points ************************************************/ Node_double* create_node_double(uint32_t start_idx, uint32_t n, int is_leaf) { Node_double *new_node; if (is_leaf) { /* Allocate only the part of the struct that will be used in a leaf node. This relies on the C99 specification of struct layout conservation and padding and that dereferencing is never attempted for the node pointers in a leaf. */ new_node = (Node_double *)malloc(sizeof(Node_double) - 2 * sizeof(Node_double *)); } else { new_node = (Node_double *)malloc(sizeof(Node_double)); } new_node->n = n; new_node->start_idx = start_idx; return new_node; } /************************************************ Delete subtree Params: root : root node of subtree to delete ************************************************/ void delete_subtree_double(Node_double *root) { if (root->cut_dim != -1) { delete_subtree_double((Node_double *)root->left_child); delete_subtree_double((Node_double *)root->right_child); } free(root); } /************************************************ Delete tree Params: tree : Tree struct of kd tree ************************************************/ void delete_tree_double(Tree_double *tree) { delete_subtree_double((Node_double *)tree->root); free(tree->bbox); free(tree->pidx); free(tree); } /************************************************ Print ************************************************/ void print_tree_double(Node_double *root, int level) { int i; for (i = 0; i < level; i++) { printf(" "); } printf("(cut_val: %f, cut_dim: %i)\n", root->cut_val, root->cut_dim); if (root->cut_dim != -1) print_tree_double((Node_double *)root->left_child, level + 1); if (root->cut_dim != -1) print_tree_double((Node_double *)root->right_child, level + 1); } /************************************************ Calculate squared cartesian distance between points Params: point1_coord : point 1 point2_coord : point 2 ************************************************/ double calc_dist_double(double *point1_coord, double *point2_coord, int8_t no_dims) { /* Calculate squared distance */ double dist = 0, dim_dist; int8_t i; for (i = 0; i < no_dims; i++) { dim_dist = point2_coord[i] - point1_coord[i]; dist += dim_dist * dim_dist; } return dist; } /************************************************ Get squared distance from point to cube in specified dimension Params: dim : dimension point_coord : cartesian coordinates of point bbox : cube ************************************************/ double get_cube_offset_double(int8_t dim, double *point_coord, double *bbox) { double dim_coord = point_coord[dim]; if (dim_coord < bbox[2 * dim]) { /* Left of cube in dimension */ return dim_coord - bbox[2 * dim]; } else if (dim_coord > bbox[2 * dim + 1]) { /* Right of cube in dimension */ return dim_coord - bbox[2 * dim + 1]; } else { /* Inside cube in dimension */ return 0.; } } /************************************************ Get minimum squared distance between point and cube. Params: point_coord : cartesian coordinates of point no_dims : number of dimensions bbox : cube ************************************************/ double get_min_dist_double(double *point_coord, int8_t no_dims, double *bbox) { double cube_offset = 0, cube_offset_dim; int8_t i; for (i = 0; i < no_dims; i++) { cube_offset_dim = get_cube_offset_double(i, point_coord, bbox); cube_offset += cube_offset_dim * cube_offset_dim; } return cube_offset; } /************************************************ Search a leaf node for closest point Params: pa : data points pidx : permutation index of data points no_dims : number of dimensions start_idx : index of first data point to use size : number of data points point_coord : query point closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_leaf_double(double *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord, uint32_t k, uint32_t *restrict closest_idx, double *restrict closest_dist) { double cur_dist; uint32_t i; /* Loop through all points in leaf */ for (i = 0; i < n; i++) { /* Get distance to query point */ cur_dist = calc_dist_double(&PA(start_idx + i, 0), point_coord, no_dims); /* Update closest info if new point is closest so far*/ if (cur_dist < closest_dist[k - 1]) { insert_point_double(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k); } } } /************************************************ Search a leaf node for closest point with data point mask Params: pa : data points pidx : permutation index of data points no_dims : number of dimensions start_idx : index of first data point to use size : number of data points point_coord : query point mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_leaf_double_mask(double *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord, uint32_t k, uint8_t *mask, uint32_t *restrict closest_idx, double *restrict closest_dist) { double cur_dist; uint32_t i; /* Loop through all points in leaf */ for (i = 0; i < n; i++) { /* Is this point masked out? */ if (mask[pidx[start_idx + i]]) { continue; } /* Get distance to query point */ cur_dist = calc_dist_double(&PA(start_idx + i, 0), point_coord, no_dims); /* Update closest info if new point is closest so far*/ if (cur_dist < closest_dist[k - 1]) { insert_point_double(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k); } } } /************************************************ Search subtree for nearest to query point Params: root : root node of subtree pa : data points pidx : permutation index of data points no_dims : number of dimensions point_coord : query point min_dist : minumum distance to nearest neighbour mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_splitnode_double(Node_double *root, double *pa, uint32_t *pidx, int8_t no_dims, double *point_coord, double min_dist, uint32_t k, double distance_upper_bound, double eps_fac, uint8_t *mask, uint32_t *closest_idx, double *closest_dist) { int8_t dim; double dist_left, dist_right; double new_offset; double box_diff; /* Skip if distance bound exeeded */ if (min_dist > distance_upper_bound) { return; } dim = root->cut_dim; /* Handle leaf node */ if (dim == -1) { if (mask) { search_leaf_double_mask(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, mask, closest_idx, closest_dist); } else { search_leaf_double(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, closest_idx, closest_dist); } return; } /* Get distance to cutting plane */ new_offset = point_coord[dim] - root->cut_val; if (new_offset < 0) { /* Left of cutting plane */ dist_left = min_dist; if (dist_left < closest_dist[k - 1] * eps_fac) { /* Search left subtree if minimum distance is below limit */ search_splitnode_double((Node_double *)root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } /* Right of cutting plane. Update minimum distance. See Algorithms for Fast Vector Quantization Sunil Arya and David M. Mount. */ box_diff = root->cut_bounds_lv - point_coord[dim]; if (box_diff < 0) { box_diff = 0; } dist_right = min_dist - box_diff * box_diff + new_offset * new_offset; if (dist_right < closest_dist[k - 1] * eps_fac) { /* Search right subtree if minimum distance is below limit*/ search_splitnode_double((Node_double *)root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } } else { /* Right of cutting plane */ dist_right = min_dist; if (dist_right < closest_dist[k - 1] * eps_fac) { /* Search right subtree if minimum distance is below limit*/ search_splitnode_double((Node_double *)root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } /* Left of cutting plane. Update minimum distance. See Algorithms for Fast Vector Quantization Sunil Arya and David M. Mount. */ box_diff = point_coord[dim] - root->cut_bounds_hv; if (box_diff < 0) { box_diff = 0; } dist_left = min_dist - box_diff * box_diff + new_offset * new_offset; if (dist_left < closest_dist[k - 1] * eps_fac) { /* Search left subtree if minimum distance is below limit*/ search_splitnode_double((Node_double *)root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } } } /************************************************ Search for nearest neighbour for a set of query points Params: tree : Tree struct of kd tree pa : data points pidx : permutation index of data points point_coords : query points num_points : number of query points mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_tree_double(Tree_double *tree, double *pa, double *point_coords, uint32_t num_points, uint32_t k, double distance_upper_bound, double eps, uint8_t *mask, uint32_t *closest_idxs, double *closest_dists) { double min_dist; double eps_fac = 1 / ((1 + eps) * (1 + eps)); int8_t no_dims = tree->no_dims; double *bbox = tree->bbox; uint32_t *pidx = tree->pidx; uint32_t j = 0; #if defined(_MSC_VER) && defined(_OPENMP) int32_t i = 0; int32_t local_num_points = (int32_t) num_points; #else uint32_t i; uint32_t local_num_points = num_points; #endif Node_double *root = (Node_double *)tree->root; /* Queries are OpenMP enabled */ #pragma omp parallel { /* The low chunk size is important to avoid L2 cache trashing for spatial coherent query datasets */ #pragma omp for private(i, j) schedule(static, 100) nowait for (i = 0; i < local_num_points; i++) { for (j = 0; j < k; j++) { closest_idxs[i * k + j] = UINT32_MAX; closest_dists[i * k + j] = DBL_MAX; } min_dist = get_min_dist_double(point_coords + no_dims * i, no_dims, bbox); search_splitnode_double(root, pa, pidx, no_dims, point_coords + no_dims * i, min_dist, k, distance_upper_bound, eps_fac, mask, &closest_idxs[i * k], &closest_dists[i * k]); } } }
/* * This kd-tree implementation is based on the scipy.spatial.cKDTree by Anne * M. Archibald and libANN by David M. Mount and Sunil Arya. */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <float.h> #define PA(i,d) (pa[no_dims * pidx[i] + d]) #define PASWAP(a,b) { uint32_t tmp = pidx[a]; pidx[a] = pidx[b]; pidx[b] = tmp; } #ifdef _MSC_VER #define restrict __restrict #endif typedef struct { float cut_val; int8_t cut_dim; uint32_t start_idx; uint32_t n; float cut_bounds_lv; float cut_bounds_hv; struct Node_float *left_child; struct Node_float *right_child; } Node_float; typedef struct { float *bbox; int8_t no_dims; uint32_t *pidx; struct Node_float *root; } Tree_float; typedef struct { double cut_val; int8_t cut_dim; uint32_t start_idx; uint32_t n; double cut_bounds_lv; double cut_bounds_hv; struct Node_double *left_child; struct Node_double *right_child; } Node_double; typedef struct { double *bbox; int8_t no_dims; uint32_t *pidx; struct Node_double *root; } Tree_double; void insert_point_float(uint32_t * closest_idx, float *closest_dist, uint32_t pidx, float cur_dist, uint32_t k); void get_bounding_box_float(float *pa, uint32_t * pidx, int8_t no_dims, uint32_t n, float *bbox); int partition_float(float *pa, uint32_t * pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *bbox, int8_t * cut_dim, float *cut_val, uint32_t * n_lo); Tree_float *construct_tree_float(float *pa, int8_t no_dims, uint32_t n, uint32_t bsp); Node_float *construct_subtree_float(float *pa, uint32_t * pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, float *bbox); Node_float *create_node_float(uint32_t start_idx, uint32_t n, int is_leaf); void delete_subtree_float(Node_float * root); void delete_tree_float(Tree_float * tree); void print_tree_float(Node_float * root, int level); float calc_dist_float(float *point1_coord, float *point2_coord, int8_t no_dims); float get_cube_offset_float(int8_t dim, float *point_coord, float *bbox); float get_min_dist_float(float *point_coord, int8_t no_dims, float *bbox); void search_leaf_float(float *restrict pa, uint32_t * restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord, uint32_t k, uint32_t * restrict closest_idx, float *restrict closest_dist); void search_leaf_float_mask(float *restrict pa, uint32_t * restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord, uint32_t k, uint8_t * restrict mask, uint32_t * restrict closest_idx, float *restrict closest_dist); void search_splitnode_float(Node_float * root, float *pa, uint32_t * pidx, int8_t no_dims, float *point_coord, float min_dist, uint32_t k, float distance_upper_bound, float eps_fac, uint8_t * mask, uint32_t * closest_idx, float *closest_dist); void search_tree_float(Tree_float * tree, float *pa, float *point_coords, uint32_t num_points, uint32_t k, float distance_upper_bound, float eps, uint8_t * mask, uint32_t * closest_idxs, float *closest_dists); void insert_point_double(uint32_t * closest_idx, double *closest_dist, uint32_t pidx, double cur_dist, uint32_t k); void get_bounding_box_double(double *pa, uint32_t * pidx, int8_t no_dims, uint32_t n, double *bbox); int partition_double(double *pa, uint32_t * pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *bbox, int8_t * cut_dim, double *cut_val, uint32_t * n_lo); Tree_double *construct_tree_double(double *pa, int8_t no_dims, uint32_t n, uint32_t bsp); Node_double *construct_subtree_double(double *pa, uint32_t * pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, double *bbox); Node_double *create_node_double(uint32_t start_idx, uint32_t n, int is_leaf); void delete_subtree_double(Node_double * root); void delete_tree_double(Tree_double * tree); void print_tree_double(Node_double * root, int level); double calc_dist_double(double *point1_coord, double *point2_coord, int8_t no_dims); double get_cube_offset_double(int8_t dim, double *point_coord, double *bbox); double get_min_dist_double(double *point_coord, int8_t no_dims, double *bbox); void search_leaf_double(double *restrict pa, uint32_t * restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord, uint32_t k, uint32_t * restrict closest_idx, double *restrict closest_dist); void search_leaf_double_mask(double *restrict pa, uint32_t * restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord, uint32_t k, uint8_t * restrict mask, uint32_t * restrict closest_idx, double *restrict closest_dist); void search_splitnode_double(Node_double * root, double *pa, uint32_t * pidx, int8_t no_dims, double *point_coord, double min_dist, uint32_t k, double distance_upper_bound, double eps_fac, uint8_t * mask, uint32_t * closest_idx, double *closest_dist); void search_tree_double(Tree_double * tree, double *pa, double *point_coords, uint32_t num_points, uint32_t k, double distance_upper_bound, double eps, uint8_t * mask, uint32_t * closest_idxs, double *closest_dists); /************************************************ Insert point into priority queue Params: closest_idx : index queue closest_dist : distance queue pidx : permutation index of data points cur_dist : distance to point inserted k : number of neighbours ************************************************/ void insert_point_float(uint32_t * closest_idx, float *closest_dist, uint32_t pidx, float cur_dist, uint32_t k) { int i; for (i = k - 1; i > 0; i--) { if (closest_dist[i - 1] > cur_dist) { closest_dist[i] = closest_dist[i - 1]; closest_idx[i] = closest_idx[i - 1]; } else { break; } } closest_idx[i] = pidx; closest_dist[i] = cur_dist; } /************************************************ Get the bounding box of a set of points Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions n : number of points bbox : bounding box (return) ************************************************/ void get_bounding_box_float(float *pa, uint32_t * pidx, int8_t no_dims, uint32_t n, float *bbox) { float cur; int8_t bbox_idx, i, j; uint32_t i2; /* Use first data point to initialize */ for (i = 0; i < no_dims; i++) { bbox[2 * i] = bbox[2 * i + 1] = PA(0, i); } /* Update using rest of data points */ for (i2 = 1; i2 < n; i2++) { for (j = 0; j < no_dims; j++) { bbox_idx = 2 * j; cur = PA(i2, j); if (cur < bbox[bbox_idx]) { bbox[bbox_idx] = cur; } else if (cur > bbox[bbox_idx + 1]) { bbox[bbox_idx + 1] = cur; } } } } /************************************************ Partition a range of data points by manipulation the permutation index. The sliding midpoint rule is used for the partitioning. Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions start_idx : index of first data point to use n : number of data points bbox : bounding box of data points cut_dim : dimension used for partition (return) cut_val : value of cutting point (return) n_lo : number of point below cutting plane (return) ************************************************/ int partition_float(float *pa, uint32_t * pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *bbox, int8_t * cut_dim, float *cut_val, uint32_t * n_lo) { int8_t dim = 0, i; uint32_t p, q, i2; float size = 0, min_val, max_val, split, side_len, cur_val; uint32_t end_idx = start_idx + n - 1; /* Find largest bounding box side */ for (i = 0; i < no_dims; i++) { side_len = bbox[2 * i + 1] - bbox[2 * i]; if (side_len > size) { dim = i; size = side_len; } } min_val = bbox[2 * dim]; max_val = bbox[2 * dim + 1]; /* Check for zero length or inconsistent */ if (min_val >= max_val) return 1; /* Use middle for splitting */ split = (min_val + max_val) / 2; /* Partition all data points around middle */ p = start_idx; q = end_idx; while (p <= q) { if (PA(p, dim) < split) { p++; } else if (PA(q, dim) >= split) { /* Guard for underflow */ if (q > 0) { q--; } else { break; } } else { PASWAP(p, q); p++; q--; } } /* Check for empty splits */ if (p == start_idx) { /* * No points less than split. Split at lowest point instead. Minimum * 1 point will be in lower box. */ uint32_t j = start_idx; split = PA(j, dim); for (i2 = start_idx + 1; i2 <= end_idx; i2++) { /* Find lowest point */ cur_val = PA(i2, dim); if (cur_val < split) { j = i2; split = cur_val; } } PASWAP(j, start_idx); p = start_idx + 1; } else if (p == end_idx + 1) { /* * No points greater than split. Split at highest point instead. * Minimum 1 point will be in higher box. */ uint32_t j = end_idx; split = PA(j, dim); for (i2 = start_idx; i2 < end_idx; i2++) { /* Find highest point */ cur_val = PA(i2, dim); if (cur_val > split) { j = i2; split = cur_val; } } PASWAP(j, end_idx); p = end_idx; } /* Set return values */ *cut_dim = dim; *cut_val = split; *n_lo = p - start_idx; return 0; } /************************************************ Construct a sub tree over a range of data points. Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions start_idx : index of first data point to use n : number of data points bsp : number of points per leaf bbox : bounding box of set of data points ************************************************/ Node_float * construct_subtree_float(float *pa, uint32_t * pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, float *bbox) { /* Create new node */ int is_leaf = (n <= bsp); Node_float *root = create_node_float(start_idx, n, is_leaf); int rval; int8_t cut_dim; uint32_t n_lo; float cut_val, lv, hv; if (is_leaf) { /* Make leaf node */ root->cut_dim = -1; } else { /* Make split node */ /* Partition data set and set node info */ rval = partition_float(pa, pidx, no_dims, start_idx, n, bbox, &cut_dim, &cut_val, &n_lo); if (rval == 1) { root->cut_dim = -1; return root; } root->cut_val = cut_val; root->cut_dim = cut_dim; /* Recurse on both subsets */ lv = bbox[2 * cut_dim]; hv = bbox[2 * cut_dim + 1]; /* Set bounds for cut dimension */ root->cut_bounds_lv = lv; root->cut_bounds_hv = hv; /* Update bounding box before call to lower subset and restore after */ bbox[2 * cut_dim + 1] = cut_val; root->left_child = (struct Node_float *)construct_subtree_float(pa, pidx, no_dims, start_idx, n_lo, bsp, bbox); bbox[2 * cut_dim + 1] = hv; /* Update bounding box before call to higher subset and restore after */ bbox[2 * cut_dim] = cut_val; root->right_child = (struct Node_float *)construct_subtree_float(pa, pidx, no_dims, start_idx + n_lo, n - n_lo, bsp, bbox); bbox[2 * cut_dim] = lv; } return root; } /************************************************ Construct a tree over data points. Params: pa : data points no_dims: number of dimensions n : number of data points bsp : number of points per leaf ************************************************/ Tree_float * construct_tree_float(float *pa, int8_t no_dims, uint32_t n, uint32_t bsp) { Tree_float *tree = (Tree_float *) malloc(sizeof(Tree_float)); uint32_t i; uint32_t *pidx; float *bbox; tree->no_dims = no_dims; /* Initialize permutation array */ pidx = (uint32_t *) malloc(sizeof(uint32_t) * n); for (i = 0; i < n; i++) { pidx[i] = i; } bbox = (float *)malloc(2 * sizeof(float) * no_dims); get_bounding_box_float(pa, pidx, no_dims, n, bbox); tree->bbox = bbox; /* Construct subtree on full dataset */ tree->root = (struct Node_float *)construct_subtree_float(pa, pidx, no_dims, 0, n, bsp, bbox); tree->pidx = pidx; return tree; } /************************************************ Create a tree node. Params: start_idx : index of first data point to use n : number of data points ************************************************/ Node_float * create_node_float(uint32_t start_idx, uint32_t n, int is_leaf) { Node_float *new_node; if (is_leaf) { /* * Allocate only the part of the struct that will be used in a leaf * node. This relies on the C99 specification of struct layout * conservation and padding and that dereferencing is never attempted * for the node pointers in a leaf. */ new_node = (Node_float *) malloc(sizeof(Node_float) - 2 * sizeof(Node_float *)); } else { new_node = (Node_float *) malloc(sizeof(Node_float)); } new_node->n = n; new_node->start_idx = start_idx; return new_node; } /************************************************ Delete subtree Params: root : root node of subtree to delete ************************************************/ void delete_subtree_float(Node_float * root) { if (root->cut_dim != -1) { delete_subtree_float((Node_float *) root->left_child); delete_subtree_float((Node_float *) root->right_child); } free(root); } /************************************************ Delete tree Params: tree : Tree struct of kd tree ************************************************/ void delete_tree_float(Tree_float * tree) { delete_subtree_float((Node_float *) tree->root); free(tree->bbox); free(tree->pidx); free(tree); } /************************************************ Print ************************************************/ void print_tree_float(Node_float * root, int level) { int i; for (i = 0; i < level; i++) { printf(" "); } printf("(cut_val: %f, cut_dim: %i)\n", root->cut_val, root->cut_dim); if (root->cut_dim != -1) print_tree_float((Node_float *) root->left_child, level + 1); if (root->cut_dim != -1) print_tree_float((Node_float *) root->right_child, level + 1); } /************************************************ Calculate squared cartesian distance between points Params: point1_coord : point 1 point2_coord : point 2 ************************************************/ float calc_dist_float(float *point1_coord, float *point2_coord, int8_t no_dims) { /* Calculate squared distance */ float dist = 0, dim_dist; int8_t i; for (i = 0; i < no_dims; i++) { dim_dist = point2_coord[i] - point1_coord[i]; dist += dim_dist * dim_dist; } return dist; } /************************************************ Get squared distance from point to cube in specified dimension Params: dim : dimension point_coord : cartesian coordinates of point bbox : cube ************************************************/ float get_cube_offset_float(int8_t dim, float *point_coord, float *bbox) { float dim_coord = point_coord[dim]; if (dim_coord < bbox[2 * dim]) { /* Left of cube in dimension */ return dim_coord - bbox[2 * dim]; } else if (dim_coord > bbox[2 * dim + 1]) { /* Right of cube in dimension */ return dim_coord - bbox[2 * dim + 1]; } else { /* Inside cube in dimension */ return 0.; } } /************************************************ Get minimum squared distance between point and cube. Params: point_coord : cartesian coordinates of point no_dims : number of dimensions bbox : cube ************************************************/ float get_min_dist_float(float *point_coord, int8_t no_dims, float *bbox) { float cube_offset = 0, cube_offset_dim; int8_t i; for (i = 0; i < no_dims; i++) { cube_offset_dim = get_cube_offset_float(i, point_coord, bbox); cube_offset += cube_offset_dim * cube_offset_dim; } return cube_offset; } /************************************************ Search a leaf node for closest point Params: pa : data points pidx : permutation index of data points no_dims : number of dimensions start_idx : index of first data point to use size : number of data points point_coord : query point closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_leaf_float(float *restrict pa, uint32_t * restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord, uint32_t k, uint32_t * restrict closest_idx, float *restrict closest_dist) { float cur_dist; uint32_t i; /* Loop through all points in leaf */ for (i = 0; i < n; i++) { /* Get distance to query point */ cur_dist = calc_dist_float(&PA(start_idx + i, 0), point_coord, no_dims); /* Update closest info if new point is closest so far */ if (cur_dist < closest_dist[k - 1]) { insert_point_float(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k); } } } /************************************************ Search a leaf node for closest point with data point mask Params: pa : data points pidx : permutation index of data points no_dims : number of dimensions start_idx : index of first data point to use size : number of data points point_coord : query point mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_leaf_float_mask(float *restrict pa, uint32_t * restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord, uint32_t k, uint8_t * mask, uint32_t * restrict closest_idx, float *restrict closest_dist) { float cur_dist; uint32_t i; /* Loop through all points in leaf */ for (i = 0; i < n; i++) { /* Is this point masked out? */ if (mask[pidx[start_idx + i]]) { continue; } /* Get distance to query point */ cur_dist = calc_dist_float(&PA(start_idx + i, 0), point_coord, no_dims); /* Update closest info if new point is closest so far */ if (cur_dist < closest_dist[k - 1]) { insert_point_float(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k); } } } /************************************************ Search subtree for nearest to query point Params: root : root node of subtree pa : data points pidx : permutation index of data points no_dims : number of dimensions point_coord : query point min_dist : minumum distance to nearest neighbour mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_splitnode_float(Node_float * root, float *pa, uint32_t * pidx, int8_t no_dims, float *point_coord, float min_dist, uint32_t k, float distance_upper_bound, float eps_fac, uint8_t * mask, uint32_t * closest_idx, float *closest_dist) { int8_t dim; float dist_left, dist_right; float new_offset; float box_diff; /* Skip if distance bound exeeded */ if (min_dist > distance_upper_bound) { return; } dim = root->cut_dim; /* Handle leaf node */ if (dim == -1) { if (mask) { search_leaf_float_mask(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, mask, closest_idx, closest_dist); } else { search_leaf_float(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, closest_idx, closest_dist); } return; } /* Get distance to cutting plane */ new_offset = point_coord[dim] - root->cut_val; if (new_offset < 0) { /* Left of cutting plane */ dist_left = min_dist; if (dist_left < closest_dist[k - 1] * eps_fac) { /* Search left subtree if minimum distance is below limit */ search_splitnode_float((Node_float *) root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } /* * Right of cutting plane. Update minimum distance. See Algorithms * for Fast Vector Quantization Sunil Arya and David M. Mount. */ box_diff = root->cut_bounds_lv - point_coord[dim]; if (box_diff < 0) { box_diff = 0; } dist_right = min_dist - box_diff * box_diff + new_offset * new_offset; if (dist_right < closest_dist[k - 1] * eps_fac) { /* Search right subtree if minimum distance is below limit */ search_splitnode_float((Node_float *) root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } } else { /* Right of cutting plane */ dist_right = min_dist; if (dist_right < closest_dist[k - 1] * eps_fac) { /* Search right subtree if minimum distance is below limit */ search_splitnode_float((Node_float *) root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } /* * Left of cutting plane. Update minimum distance. See Algorithms for * Fast Vector Quantization Sunil Arya and David M. Mount. */ box_diff = point_coord[dim] - root->cut_bounds_hv; if (box_diff < 0) { box_diff = 0; } dist_left = min_dist - box_diff * box_diff + new_offset * new_offset; if (dist_left < closest_dist[k - 1] * eps_fac) { /* Search left subtree if minimum distance is below limit */ search_splitnode_float((Node_float *) root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } } } /************************************************ Search for nearest neighbour for a set of query points Params: tree : Tree struct of kd tree pa : data points pidx : permutation index of data points point_coords : query points num_points : number of query points mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_tree_float(Tree_float * tree, float *pa, float *point_coords, uint32_t num_points, uint32_t k, float distance_upper_bound, float eps, uint8_t * mask, uint32_t * closest_idxs, float *closest_dists) { float min_dist; float eps_fac = 1 / ((1 + eps) * (1 + eps)); int8_t no_dims = tree->no_dims; float *bbox = tree->bbox; uint32_t *pidx = tree->pidx; uint32_t j = 0; Node_float *root = (Node_float *) tree->root; /* Queries are OpenMP enabled */ /* * The low chunk size is important to avoid L2 cache trashing for spatial * coherent query datasets */ for (i = 0; i < local_num_points; i++) { for (j = 0; j < k; j++) { closest_idxs[i * k + j] = UINT32_MAX; closest_dists[i * k + j] = DBL_MAX; } min_dist = get_min_dist_float(point_coords + no_dims * i, no_dims, bbox); search_splitnode_float(root, pa, pidx, no_dims, point_coords + no_dims * i, min_dist, k, distance_upper_bound, eps_fac, mask, &closest_idxs[i * k], &closest_dists[i * k]); } } /************************************************ Insert point into priority queue Params: closest_idx : index queue closest_dist : distance queue pidx : permutation index of data points cur_dist : distance to point inserted k : number of neighbours ************************************************/ void insert_point_double(uint32_t * closest_idx, double *closest_dist, uint32_t pidx, double cur_dist, uint32_t k) { int i; for (i = k - 1; i > 0; i--) { if (closest_dist[i - 1] > cur_dist) { closest_dist[i] = closest_dist[i - 1]; closest_idx[i] = closest_idx[i - 1]; } else { break; } } closest_idx[i] = pidx; closest_dist[i] = cur_dist; } /************************************************ Get the bounding box of a set of points Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions n : number of points bbox : bounding box (return) ************************************************/ void get_bounding_box_double(double *pa, uint32_t * pidx, int8_t no_dims, uint32_t n, double *bbox) { double cur; int8_t bbox_idx, i, j; uint32_t i2; /* Use first data point to initialize */ for (i = 0; i < no_dims; i++) { bbox[2 * i] = bbox[2 * i + 1] = PA(0, i); } /* Update using rest of data points */ for (i2 = 1; i2 < n; i2++) { for (j = 0; j < no_dims; j++) { bbox_idx = 2 * j; cur = PA(i2, j); if (cur < bbox[bbox_idx]) { bbox[bbox_idx] = cur; } else if (cur > bbox[bbox_idx + 1]) { bbox[bbox_idx + 1] = cur; } } } } /************************************************ Partition a range of data points by manipulation the permutation index. The sliding midpoint rule is used for the partitioning. Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions start_idx : index of first data point to use n : number of data points bbox : bounding box of data points cut_dim : dimension used for partition (return) cut_val : value of cutting point (return) n_lo : number of point below cutting plane (return) ************************************************/ int partition_double(double *pa, uint32_t * pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *bbox, int8_t * cut_dim, double *cut_val, uint32_t * n_lo) { int8_t dim = 0, i; uint32_t p, q, i2; double size = 0, min_val, max_val, split, side_len, cur_val; uint32_t end_idx = start_idx + n - 1; /* Find largest bounding box side */ for (i = 0; i < no_dims; i++) { side_len = bbox[2 * i + 1] - bbox[2 * i]; if (side_len > size) { dim = i; size = side_len; } } min_val = bbox[2 * dim]; max_val = bbox[2 * dim + 1]; /* Check for zero length or inconsistent */ if (min_val >= max_val) return 1; /* Use middle for splitting */ split = (min_val + max_val) / 2; /* Partition all data points around middle */ p = start_idx; q = end_idx; while (p <= q) { if (PA(p, dim) < split) { p++; } else if (PA(q, dim) >= split) { /* Guard for underflow */ if (q > 0) { q--; } else { break; } } else { PASWAP(p, q); p++; q--; } } /* Check for empty splits */ if (p == start_idx) { /* * No points less than split. Split at lowest point instead. Minimum * 1 point will be in lower box. */ uint32_t j = start_idx; split = PA(j, dim); for (i2 = start_idx + 1; i2 <= end_idx; i2++) { /* Find lowest point */ cur_val = PA(i2, dim); if (cur_val < split) { j = i2; split = cur_val; } } PASWAP(j, start_idx); p = start_idx + 1; } else if (p == end_idx + 1) { /* * No points greater than split. Split at highest point instead. * Minimum 1 point will be in higher box. */ uint32_t j = end_idx; split = PA(j, dim); for (i2 = start_idx; i2 < end_idx; i2++) { /* Find highest point */ cur_val = PA(i2, dim); if (cur_val > split) { j = i2; split = cur_val; } } PASWAP(j, end_idx); p = end_idx; } /* Set return values */ *cut_dim = dim; *cut_val = split; *n_lo = p - start_idx; return 0; } /************************************************ Construct a sub tree over a range of data points. Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions start_idx : index of first data point to use n : number of data points bsp : number of points per leaf bbox : bounding box of set of data points ************************************************/ Node_double * construct_subtree_double(double *pa, uint32_t * pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, double *bbox) { /* Create new node */ int is_leaf = (n <= bsp); Node_double *root = create_node_double(start_idx, n, is_leaf); int rval; int8_t cut_dim; uint32_t n_lo; double cut_val, lv, hv; if (is_leaf) { /* Make leaf node */ root->cut_dim = -1; } else { /* Make split node */ /* Partition data set and set node info */ rval = partition_double(pa, pidx, no_dims, start_idx, n, bbox, &cut_dim, &cut_val, &n_lo); if (rval == 1) { root->cut_dim = -1; return root; } root->cut_val = cut_val; root->cut_dim = cut_dim; /* Recurse on both subsets */ lv = bbox[2 * cut_dim]; hv = bbox[2 * cut_dim + 1]; /* Set bounds for cut dimension */ root->cut_bounds_lv = lv; root->cut_bounds_hv = hv; /* Update bounding box before call to lower subset and restore after */ bbox[2 * cut_dim + 1] = cut_val; root->left_child = (struct Node_double *)construct_subtree_double(pa, pidx, no_dims, start_idx, n_lo, bsp, bbox); bbox[2 * cut_dim + 1] = hv; /* Update bounding box before call to higher subset and restore after */ bbox[2 * cut_dim] = cut_val; root->right_child = (struct Node_double *)construct_subtree_double(pa, pidx, no_dims, start_idx + n_lo, n - n_lo, bsp, bbox); bbox[2 * cut_dim] = lv; } return root; } /************************************************ Construct a tree over data points. Params: pa : data points no_dims: number of dimensions n : number of data points bsp : number of points per leaf ************************************************/ Tree_double * construct_tree_double(double *pa, int8_t no_dims, uint32_t n, uint32_t bsp) { Tree_double *tree = (Tree_double *) malloc(sizeof(Tree_double)); uint32_t i; uint32_t *pidx; double *bbox; tree->no_dims = no_dims; /* Initialize permutation array */ pidx = (uint32_t *) malloc(sizeof(uint32_t) * n); for (i = 0; i < n; i++) { pidx[i] = i; } bbox = (double *)malloc(2 * sizeof(double) * no_dims); get_bounding_box_double(pa, pidx, no_dims, n, bbox); tree->bbox = bbox; /* Construct subtree on full dataset */ tree->root = (struct Node_double *)construct_subtree_double(pa, pidx, no_dims, 0, n, bsp, bbox); tree->pidx = pidx; return tree; } /************************************************ Create a tree node. Params: start_idx : index of first data point to use n : number of data points ************************************************/ Node_double * create_node_double(uint32_t start_idx, uint32_t n, int is_leaf) { Node_double *new_node; if (is_leaf) { /* * Allocate only the part of the struct that will be used in a leaf * node. This relies on the C99 specification of struct layout * conservation and padding and that dereferencing is never attempted * for the node pointers in a leaf. */ new_node = (Node_double *) malloc(sizeof(Node_double) - 2 * sizeof(Node_double *)); } else { new_node = (Node_double *) malloc(sizeof(Node_double)); } new_node->n = n; new_node->start_idx = start_idx; return new_node; } /************************************************ Delete subtree Params: root : root node of subtree to delete ************************************************/ void delete_subtree_double(Node_double * root) { if (root->cut_dim != -1) { delete_subtree_double((Node_double *) root->left_child); delete_subtree_double((Node_double *) root->right_child); } free(root); } /************************************************ Delete tree Params: tree : Tree struct of kd tree ************************************************/ void delete_tree_double(Tree_double * tree) { delete_subtree_double((Node_double *) tree->root); free(tree->bbox); free(tree->pidx); free(tree); } /************************************************ Print ************************************************/ void print_tree_double(Node_double * root, int level) { int i; for (i = 0; i < level; i++) { printf(" "); } printf("(cut_val: %f, cut_dim: %i)\n", root->cut_val, root->cut_dim); if (root->cut_dim != -1) print_tree_double((Node_double *) root->left_child, level + 1); if (root->cut_dim != -1) print_tree_double((Node_double *) root->right_child, level + 1); } /************************************************ Calculate squared cartesian distance between points Params: point1_coord : point 1 point2_coord : point 2 ************************************************/ double calc_dist_double(double *point1_coord, double *point2_coord, int8_t no_dims) { /* Calculate squared distance */ double dist = 0, dim_dist; int8_t i; for (i = 0; i < no_dims; i++) { dim_dist = point2_coord[i] - point1_coord[i]; dist += dim_dist * dim_dist; } return dist; } /************************************************ Get squared distance from point to cube in specified dimension Params: dim : dimension point_coord : cartesian coordinates of point bbox : cube ************************************************/ double get_cube_offset_double(int8_t dim, double *point_coord, double *bbox) { double dim_coord = point_coord[dim]; if (dim_coord < bbox[2 * dim]) { /* Left of cube in dimension */ return dim_coord - bbox[2 * dim]; } else if (dim_coord > bbox[2 * dim + 1]) { /* Right of cube in dimension */ return dim_coord - bbox[2 * dim + 1]; } else { /* Inside cube in dimension */ return 0.; } } /************************************************ Get minimum squared distance between point and cube. Params: point_coord : cartesian coordinates of point no_dims : number of dimensions bbox : cube ************************************************/ double get_min_dist_double(double *point_coord, int8_t no_dims, double *bbox) { double cube_offset = 0, cube_offset_dim; int8_t i; for (i = 0; i < no_dims; i++) { cube_offset_dim = get_cube_offset_double(i, point_coord, bbox); cube_offset += cube_offset_dim * cube_offset_dim; } return cube_offset; } /************************************************ Search a leaf node for closest point Params: pa : data points pidx : permutation index of data points no_dims : number of dimensions start_idx : index of first data point to use size : number of data points point_coord : query point closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_leaf_double(double *restrict pa, uint32_t * restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord, uint32_t k, uint32_t * restrict closest_idx, double *restrict closest_dist) { double cur_dist; uint32_t i; /* Loop through all points in leaf */ for (i = 0; i < n; i++) { /* Get distance to query point */ cur_dist = calc_dist_double(&PA(start_idx + i, 0), point_coord, no_dims); /* Update closest info if new point is closest so far */ if (cur_dist < closest_dist[k - 1]) { insert_point_double(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k); } } } /************************************************ Search a leaf node for closest point with data point mask Params: pa : data points pidx : permutation index of data points no_dims : number of dimensions start_idx : index of first data point to use size : number of data points point_coord : query point mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_leaf_double_mask(double *restrict pa, uint32_t * restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord, uint32_t k, uint8_t * mask, uint32_t * restrict closest_idx, double *restrict closest_dist) { double cur_dist; uint32_t i; /* Loop through all points in leaf */ for (i = 0; i < n; i++) { /* Is this point masked out? */ if (mask[pidx[start_idx + i]]) { continue; } /* Get distance to query point */ cur_dist = calc_dist_double(&PA(start_idx + i, 0), point_coord, no_dims); /* Update closest info if new point is closest so far */ if (cur_dist < closest_dist[k - 1]) { insert_point_double(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k); } } } /************************************************ Search subtree for nearest to query point Params: root : root node of subtree pa : data points pidx : permutation index of data points no_dims : number of dimensions point_coord : query point min_dist : minumum distance to nearest neighbour mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_splitnode_double(Node_double * root, double *pa, uint32_t * pidx, int8_t no_dims, double *point_coord, double min_dist, uint32_t k, double distance_upper_bound, double eps_fac, uint8_t * mask, uint32_t * closest_idx, double *closest_dist) { int8_t dim; double dist_left, dist_right; double new_offset; double box_diff; /* Skip if distance bound exeeded */ if (min_dist > distance_upper_bound) { return; } dim = root->cut_dim; /* Handle leaf node */ if (dim == -1) { if (mask) { search_leaf_double_mask(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, mask, closest_idx, closest_dist); } else { search_leaf_double(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, closest_idx, closest_dist); } return; } /* Get distance to cutting plane */ new_offset = point_coord[dim] - root->cut_val; if (new_offset < 0) { /* Left of cutting plane */ dist_left = min_dist; if (dist_left < closest_dist[k - 1] * eps_fac) { /* Search left subtree if minimum distance is below limit */ search_splitnode_double((Node_double *) root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } /* * Right of cutting plane. Update minimum distance. See Algorithms * for Fast Vector Quantization Sunil Arya and David M. Mount. */ box_diff = root->cut_bounds_lv - point_coord[dim]; if (box_diff < 0) { box_diff = 0; } dist_right = min_dist - box_diff * box_diff + new_offset * new_offset; if (dist_right < closest_dist[k - 1] * eps_fac) { /* Search right subtree if minimum distance is below limit */ search_splitnode_double((Node_double *) root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } } else { /* Right of cutting plane */ dist_right = min_dist; if (dist_right < closest_dist[k - 1] * eps_fac) { /* Search right subtree if minimum distance is below limit */ search_splitnode_double((Node_double *) root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } /* * Left of cutting plane. Update minimum distance. See Algorithms for * Fast Vector Quantization Sunil Arya and David M. Mount. */ box_diff = point_coord[dim] - root->cut_bounds_hv; if (box_diff < 0) { box_diff = 0; } dist_left = min_dist - box_diff * box_diff + new_offset * new_offset; if (dist_left < closest_dist[k - 1] * eps_fac) { /* Search left subtree if minimum distance is below limit */ search_splitnode_double((Node_double *) root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } } } /************************************************ Search for nearest neighbour for a set of query points Params: tree : Tree struct of kd tree pa : data points pidx : permutation index of data points point_coords : query points num_points : number of query points mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_tree_double(Tree_double * tree, double *pa, double *point_coords, uint32_t num_points, uint32_t k, double distance_upper_bound, double eps, uint8_t * mask, uint32_t * closest_idxs, double *closest_dists) { double min_dist; double eps_fac = 1 / ((1 + eps) * (1 + eps)); int8_t no_dims = tree->no_dims; double *bbox = tree->bbox; uint32_t *pidx = tree->pidx; uint32_t j = 0; Node_double *root = (Node_double *) tree->root; /* Queries are OpenMP enabled */ /* * The low chunk size is important to avoid L2 cache trashing for spatial * coherent query datasets */ for (i = 0; i < local_num_points; i++) { for (j = 0; j < k; j++) { closest_idxs[i * k + j] = UINT32_MAX; closest_dists[i * k + j] = DBL_MAX; } min_dist = get_min_dist_double(point_coords + no_dims * i, no_dims, bbox); search_splitnode_double(root, pa, pidx, no_dims, point_coords + no_dims * i, min_dist, k, distance_upper_bound, eps_fac, mask, &closest_idxs[i * k], &closest_dists[i * k]); } }
/* * This kd-tree implementation is based on the scipy.spatial.cKDTree by Anne * M. Archibald and libANN by David M. Mount and Sunil Arya. */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <float.h> #define PA(i,d) (pa[no_dims * pidx[i] + d]) #define PASWAP(a,b) { uint32_t tmp = pidx[a]; pidx[a] = pidx[b]; pidx[b] = tmp; } #ifdef _MSC_VER #define restrict __restrict #endif typedef struct { float cut_val; int8_t cut_dim; uint32_t start_idx; uint32_t n; float cut_bounds_lv; float cut_bounds_hv; struct Node_float *left_child; struct Node_float *right_child; } Node_float; typedef struct { float *bbox; int8_t no_dims; uint32_t *pidx; struct Node_float *root; } Tree_float; typedef struct { double cut_val; int8_t cut_dim; uint32_t start_idx; uint32_t n; double cut_bounds_lv; double cut_bounds_hv; struct Node_double *left_child; struct Node_double *right_child; } Node_double; typedef struct { double *bbox; int8_t no_dims; uint32_t *pidx; struct Node_double *root; } Tree_double; void insert_point_float(uint32_t * closest_idx, float *closest_dist, uint32_t pidx, float cur_dist, uint32_t k); void get_bounding_box_float(float *pa, uint32_t * pidx, int8_t no_dims, uint32_t n, float *bbox); int partition_float(float *pa, uint32_t * pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *bbox, int8_t * cut_dim, float *cut_val, uint32_t * n_lo); Tree_float *construct_tree_float(float *pa, int8_t no_dims, uint32_t n, uint32_t bsp); Node_float *construct_subtree_float(float *pa, uint32_t * pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, float *bbox); Node_float *create_node_float(uint32_t start_idx, uint32_t n, int is_leaf); void delete_subtree_float(Node_float * root); void delete_tree_float(Tree_float * tree); void print_tree_float(Node_float * root, int level); float calc_dist_float(float *point1_coord, float *point2_coord, int8_t no_dims); float get_cube_offset_float(int8_t dim, float *point_coord, float *bbox); float get_min_dist_float(float *point_coord, int8_t no_dims, float *bbox); void search_leaf_float(float *restrict pa, uint32_t * restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord, uint32_t k, uint32_t * restrict closest_idx, float *restrict closest_dist); void search_leaf_float_mask(float *restrict pa, uint32_t * restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord, uint32_t k, uint8_t * restrict mask, uint32_t * restrict closest_idx, float *restrict closest_dist); void search_splitnode_float(Node_float * root, float *pa, uint32_t * pidx, int8_t no_dims, float *point_coord, float min_dist, uint32_t k, float distance_upper_bound, float eps_fac, uint8_t * mask, uint32_t * closest_idx, float *closest_dist); void search_tree_float(Tree_float * tree, float *pa, float *point_coords, uint32_t num_points, uint32_t k, float distance_upper_bound, float eps, uint8_t * mask, uint32_t * closest_idxs, float *closest_dists); void insert_point_double(uint32_t * closest_idx, double *closest_dist, uint32_t pidx, double cur_dist, uint32_t k); void get_bounding_box_double(double *pa, uint32_t * pidx, int8_t no_dims, uint32_t n, double *bbox); int partition_double(double *pa, uint32_t * pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *bbox, int8_t * cut_dim, double *cut_val, uint32_t * n_lo); Tree_double *construct_tree_double(double *pa, int8_t no_dims, uint32_t n, uint32_t bsp); Node_double *construct_subtree_double(double *pa, uint32_t * pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, double *bbox); Node_double *create_node_double(uint32_t start_idx, uint32_t n, int is_leaf); void delete_subtree_double(Node_double * root); void delete_tree_double(Tree_double * tree); void print_tree_double(Node_double * root, int level); double calc_dist_double(double *point1_coord, double *point2_coord, int8_t no_dims); double get_cube_offset_double(int8_t dim, double *point_coord, double *bbox); double get_min_dist_double(double *point_coord, int8_t no_dims, double *bbox); void search_leaf_double(double *restrict pa, uint32_t * restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord, uint32_t k, uint32_t * restrict closest_idx, double *restrict closest_dist); void search_leaf_double_mask(double *restrict pa, uint32_t * restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord, uint32_t k, uint8_t * restrict mask, uint32_t * restrict closest_idx, double *restrict closest_dist); void search_splitnode_double(Node_double * root, double *pa, uint32_t * pidx, int8_t no_dims, double *point_coord, double min_dist, uint32_t k, double distance_upper_bound, double eps_fac, uint8_t * mask, uint32_t * closest_idx, double *closest_dist); void search_tree_double(Tree_double * tree, double *pa, double *point_coords, uint32_t num_points, uint32_t k, double distance_upper_bound, double eps, uint8_t * mask, uint32_t * closest_idxs, double *closest_dists); /************************************************ Insert point into priority queue Params: closest_idx : index queue closest_dist : distance queue pidx : permutation index of data points cur_dist : distance to point inserted k : number of neighbours ************************************************/ void insert_point_float(uint32_t * closest_idx, float *closest_dist, uint32_t pidx, float cur_dist, uint32_t k) { int i; for (i = k - 1; i > 0; i--) { if (closest_dist[i - 1] > cur_dist) { closest_dist[i] = closest_dist[i - 1]; closest_idx[i] = closest_idx[i - 1]; } else { break; } } closest_idx[i] = pidx; closest_dist[i] = cur_dist; } /************************************************ Get the bounding box of a set of points Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions n : number of points bbox : bounding box (return) ************************************************/ void get_bounding_box_float(float *pa, uint32_t * pidx, int8_t no_dims, uint32_t n, float *bbox) { float cur; int8_t bbox_idx, i, j; uint32_t i2; /* Use first data point to initialize */ for (i = 0; i < no_dims; i++) { bbox[2 * i] = bbox[2 * i + 1] = PA(0, i); } /* Update using rest of data points */ for (i2 = 1; i2 < n; i2++) { for (j = 0; j < no_dims; j++) { bbox_idx = 2 * j; cur = PA(i2, j); if (cur < bbox[bbox_idx]) { bbox[bbox_idx] = cur; } else if (cur > bbox[bbox_idx + 1]) { bbox[bbox_idx + 1] = cur; } } } } /************************************************ Partition a range of data points by manipulation the permutation index. The sliding midpoint rule is used for the partitioning. Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions start_idx : index of first data point to use n : number of data points bbox : bounding box of data points cut_dim : dimension used for partition (return) cut_val : value of cutting point (return) n_lo : number of point below cutting plane (return) ************************************************/ int partition_float(float *pa, uint32_t * pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *bbox, int8_t * cut_dim, float *cut_val, uint32_t * n_lo) { int8_t dim = 0, i; uint32_t p, q, i2; float size = 0, min_val, max_val, split, side_len, cur_val; uint32_t end_idx = start_idx + n - 1; /* Find largest bounding box side */ for (i = 0; i < no_dims; i++) { side_len = bbox[2 * i + 1] - bbox[2 * i]; if (side_len > size) { dim = i; size = side_len; } } min_val = bbox[2 * dim]; max_val = bbox[2 * dim + 1]; /* Check for zero length or inconsistent */ if (min_val >= max_val) return 1; /* Use middle for splitting */ split = (min_val + max_val) / 2; /* Partition all data points around middle */ p = start_idx; q = end_idx; while (p <= q) { if (PA(p, dim) < split) { p++; } else if (PA(q, dim) >= split) { /* Guard for underflow */ if (q > 0) { q--; } else { break; } } else { PASWAP(p, q); p++; q--; } } /* Check for empty splits */ if (p == start_idx) { /* * No points less than split. Split at lowest point instead. Minimum * 1 point will be in lower box. */ uint32_t j = start_idx; split = PA(j, dim); for (i2 = start_idx + 1; i2 <= end_idx; i2++) { /* Find lowest point */ cur_val = PA(i2, dim); if (cur_val < split) { j = i2; split = cur_val; } } PASWAP(j, start_idx); p = start_idx + 1; } else if (p == end_idx + 1) { /* * No points greater than split. Split at highest point instead. * Minimum 1 point will be in higher box. */ uint32_t j = end_idx; split = PA(j, dim); for (i2 = start_idx; i2 < end_idx; i2++) { /* Find highest point */ cur_val = PA(i2, dim); if (cur_val > split) { j = i2; split = cur_val; } } PASWAP(j, end_idx); p = end_idx; } /* Set return values */ *cut_dim = dim; *cut_val = split; *n_lo = p - start_idx; return 0; } /************************************************ Construct a sub tree over a range of data points. Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions start_idx : index of first data point to use n : number of data points bsp : number of points per leaf bbox : bounding box of set of data points ************************************************/ Node_float * construct_subtree_float(float *pa, uint32_t * pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, float *bbox) { /* Create new node */ int is_leaf = (n <= bsp); Node_float *root = create_node_float(start_idx, n, is_leaf); int rval; int8_t cut_dim; uint32_t n_lo; float cut_val, lv, hv; if (is_leaf) { /* Make leaf node */ root->cut_dim = -1; } else { /* Make split node */ /* Partition data set and set node info */ rval = partition_float(pa, pidx, no_dims, start_idx, n, bbox, &cut_dim, &cut_val, &n_lo); if (rval == 1) { root->cut_dim = -1; return root; } root->cut_val = cut_val; root->cut_dim = cut_dim; /* Recurse on both subsets */ lv = bbox[2 * cut_dim]; hv = bbox[2 * cut_dim + 1]; /* Set bounds for cut dimension */ root->cut_bounds_lv = lv; root->cut_bounds_hv = hv; /* Update bounding box before call to lower subset and restore after */ bbox[2 * cut_dim + 1] = cut_val; root->left_child = (struct Node_float *)construct_subtree_float(pa, pidx, no_dims, start_idx, n_lo, bsp, bbox); bbox[2 * cut_dim + 1] = hv; /* Update bounding box before call to higher subset and restore after */ bbox[2 * cut_dim] = cut_val; root->right_child = (struct Node_float *)construct_subtree_float(pa, pidx, no_dims, start_idx + n_lo, n - n_lo, bsp, bbox); bbox[2 * cut_dim] = lv; } return root; } /************************************************ Construct a tree over data points. Params: pa : data points no_dims: number of dimensions n : number of data points bsp : number of points per leaf ************************************************/ Tree_float * construct_tree_float(float *pa, int8_t no_dims, uint32_t n, uint32_t bsp) { Tree_float *tree = (Tree_float *) malloc(sizeof(Tree_float)); uint32_t i; uint32_t *pidx; float *bbox; tree->no_dims = no_dims; /* Initialize permutation array */ pidx = (uint32_t *) malloc(sizeof(uint32_t) * n); for (i = 0; i < n; i++) { pidx[i] = i; } bbox = (float *)malloc(2 * sizeof(float) * no_dims); get_bounding_box_float(pa, pidx, no_dims, n, bbox); tree->bbox = bbox; /* Construct subtree on full dataset */ tree->root = (struct Node_float *)construct_subtree_float(pa, pidx, no_dims, 0, n, bsp, bbox); tree->pidx = pidx; return tree; } /************************************************ Create a tree node. Params: start_idx : index of first data point to use n : number of data points ************************************************/ Node_float * create_node_float(uint32_t start_idx, uint32_t n, int is_leaf) { Node_float *new_node; if (is_leaf) { /* * Allocate only the part of the struct that will be used in a leaf * node. This relies on the C99 specification of struct layout * conservation and padding and that dereferencing is never attempted * for the node pointers in a leaf. */ new_node = (Node_float *) malloc(sizeof(Node_float) - 2 * sizeof(Node_float *)); } else { new_node = (Node_float *) malloc(sizeof(Node_float)); } new_node->n = n; new_node->start_idx = start_idx; return new_node; } /************************************************ Delete subtree Params: root : root node of subtree to delete ************************************************/ void delete_subtree_float(Node_float * root) { if (root->cut_dim != -1) { delete_subtree_float((Node_float *) root->left_child); delete_subtree_float((Node_float *) root->right_child); } free(root); } /************************************************ Delete tree Params: tree : Tree struct of kd tree ************************************************/ void delete_tree_float(Tree_float * tree) { delete_subtree_float((Node_float *) tree->root); free(tree->bbox); free(tree->pidx); free(tree); } /************************************************ Print ************************************************/ void print_tree_float(Node_float * root, int level) { int i; for (i = 0; i < level; i++) { printf(" "); } printf("(cut_val: %f, cut_dim: %i)\n", root->cut_val, root->cut_dim); if (root->cut_dim != -1) print_tree_float((Node_float *) root->left_child, level + 1); if (root->cut_dim != -1) print_tree_float((Node_float *) root->right_child, level + 1); } /************************************************ Calculate squared cartesian distance between points Params: point1_coord : point 1 point2_coord : point 2 ************************************************/ float calc_dist_float(float *point1_coord, float *point2_coord, int8_t no_dims) { /* Calculate squared distance */ float dist = 0, dim_dist; int8_t i; for (i = 0; i < no_dims; i++) { dim_dist = point2_coord[i] - point1_coord[i]; dist += dim_dist * dim_dist; } return dist; } /************************************************ Get squared distance from point to cube in specified dimension Params: dim : dimension point_coord : cartesian coordinates of point bbox : cube ************************************************/ float get_cube_offset_float(int8_t dim, float *point_coord, float *bbox) { float dim_coord = point_coord[dim]; if (dim_coord < bbox[2 * dim]) { /* Left of cube in dimension */ return dim_coord - bbox[2 * dim]; } else if (dim_coord > bbox[2 * dim + 1]) { /* Right of cube in dimension */ return dim_coord - bbox[2 * dim + 1]; } else { /* Inside cube in dimension */ return 0.; } } /************************************************ Get minimum squared distance between point and cube. Params: point_coord : cartesian coordinates of point no_dims : number of dimensions bbox : cube ************************************************/ float get_min_dist_float(float *point_coord, int8_t no_dims, float *bbox) { float cube_offset = 0, cube_offset_dim; int8_t i; for (i = 0; i < no_dims; i++) { cube_offset_dim = get_cube_offset_float(i, point_coord, bbox); cube_offset += cube_offset_dim * cube_offset_dim; } return cube_offset; } /************************************************ Search a leaf node for closest point Params: pa : data points pidx : permutation index of data points no_dims : number of dimensions start_idx : index of first data point to use size : number of data points point_coord : query point closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_leaf_float(float *restrict pa, uint32_t * restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord, uint32_t k, uint32_t * restrict closest_idx, float *restrict closest_dist) { float cur_dist; uint32_t i; /* Loop through all points in leaf */ for (i = 0; i < n; i++) { /* Get distance to query point */ cur_dist = calc_dist_float(&PA(start_idx + i, 0), point_coord, no_dims); /* Update closest info if new point is closest so far */ if (cur_dist < closest_dist[k - 1]) { insert_point_float(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k); } } } /************************************************ Search a leaf node for closest point with data point mask Params: pa : data points pidx : permutation index of data points no_dims : number of dimensions start_idx : index of first data point to use size : number of data points point_coord : query point mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_leaf_float_mask(float *restrict pa, uint32_t * restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord, uint32_t k, uint8_t * mask, uint32_t * restrict closest_idx, float *restrict closest_dist) { float cur_dist; uint32_t i; /* Loop through all points in leaf */ for (i = 0; i < n; i++) { /* Is this point masked out? */ if (mask[pidx[start_idx + i]]) { continue; } /* Get distance to query point */ cur_dist = calc_dist_float(&PA(start_idx + i, 0), point_coord, no_dims); /* Update closest info if new point is closest so far */ if (cur_dist < closest_dist[k - 1]) { insert_point_float(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k); } } } /************************************************ Search subtree for nearest to query point Params: root : root node of subtree pa : data points pidx : permutation index of data points no_dims : number of dimensions point_coord : query point min_dist : minumum distance to nearest neighbour mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_splitnode_float(Node_float * root, float *pa, uint32_t * pidx, int8_t no_dims, float *point_coord, float min_dist, uint32_t k, float distance_upper_bound, float eps_fac, uint8_t * mask, uint32_t * closest_idx, float *closest_dist) { int8_t dim; float dist_left, dist_right; float new_offset; float box_diff; /* Skip if distance bound exeeded */ if (min_dist > distance_upper_bound) { return; } dim = root->cut_dim; /* Handle leaf node */ if (dim == -1) { if (mask) { search_leaf_float_mask(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, mask, closest_idx, closest_dist); } else { search_leaf_float(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, closest_idx, closest_dist); } return; } /* Get distance to cutting plane */ new_offset = point_coord[dim] - root->cut_val; if (new_offset < 0) { /* Left of cutting plane */ dist_left = min_dist; if (dist_left < closest_dist[k - 1] * eps_fac) { /* Search left subtree if minimum distance is below limit */ search_splitnode_float((Node_float *) root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } /* * Right of cutting plane. Update minimum distance. See Algorithms * for Fast Vector Quantization Sunil Arya and David M. Mount. */ box_diff = root->cut_bounds_lv - point_coord[dim]; if (box_diff < 0) { box_diff = 0; } dist_right = min_dist - box_diff * box_diff + new_offset * new_offset; if (dist_right < closest_dist[k - 1] * eps_fac) { /* Search right subtree if minimum distance is below limit */ search_splitnode_float((Node_float *) root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } } else { /* Right of cutting plane */ dist_right = min_dist; if (dist_right < closest_dist[k - 1] * eps_fac) { /* Search right subtree if minimum distance is below limit */ search_splitnode_float((Node_float *) root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } /* * Left of cutting plane. Update minimum distance. See Algorithms for * Fast Vector Quantization Sunil Arya and David M. Mount. */ box_diff = point_coord[dim] - root->cut_bounds_hv; if (box_diff < 0) { box_diff = 0; } dist_left = min_dist - box_diff * box_diff + new_offset * new_offset; if (dist_left < closest_dist[k - 1] * eps_fac) { /* Search left subtree if minimum distance is below limit */ search_splitnode_float((Node_float *) root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } } } /************************************************ Search for nearest neighbour for a set of query points Params: tree : Tree struct of kd tree pa : data points pidx : permutation index of data points point_coords : query points num_points : number of query points mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_tree_float(Tree_float * tree, float *pa, float *point_coords, uint32_t num_points, uint32_t k, float distance_upper_bound, float eps, uint8_t * mask, uint32_t * closest_idxs, float *closest_dists) { float min_dist; float eps_fac = 1 / ((1 + eps) * (1 + eps)); int8_t no_dims = tree->no_dims; float *bbox = tree->bbox; uint32_t *pidx = tree->pidx; uint32_t j = 0; #if defined(_MSC_VER) && defined(_OPENMP) int32_t i = 0; int32_t local_num_points = (int32_t) num_points; #else uint32_t i; uint32_t local_num_points = num_points; #endif Node_float *root = (Node_float *) tree->root; /* Queries are OpenMP enabled */ #pragma omp parallel { /* * The low chunk size is important to avoid L2 cache trashing for * spatial coherent query datasets */ #pragma omp for private(i, j) schedule(static, 100) nowait for (i = 0; i < local_num_points; i++) { for (j = 0; j < k; j++) { closest_idxs[i * k + j] = UINT32_MAX; closest_dists[i * k + j] = DBL_MAX; } min_dist = get_min_dist_float(point_coords + no_dims * i, no_dims, bbox); search_splitnode_float(root, pa, pidx, no_dims, point_coords + no_dims * i, min_dist, k, distance_upper_bound, eps_fac, mask, &closest_idxs[i * k], &closest_dists[i * k]); } } } /************************************************ Insert point into priority queue Params: closest_idx : index queue closest_dist : distance queue pidx : permutation index of data points cur_dist : distance to point inserted k : number of neighbours ************************************************/ void insert_point_double(uint32_t * closest_idx, double *closest_dist, uint32_t pidx, double cur_dist, uint32_t k) { int i; for (i = k - 1; i > 0; i--) { if (closest_dist[i - 1] > cur_dist) { closest_dist[i] = closest_dist[i - 1]; closest_idx[i] = closest_idx[i - 1]; } else { break; } } closest_idx[i] = pidx; closest_dist[i] = cur_dist; } /************************************************ Get the bounding box of a set of points Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions n : number of points bbox : bounding box (return) ************************************************/ void get_bounding_box_double(double *pa, uint32_t * pidx, int8_t no_dims, uint32_t n, double *bbox) { double cur; int8_t bbox_idx, i, j; uint32_t i2; /* Use first data point to initialize */ for (i = 0; i < no_dims; i++) { bbox[2 * i] = bbox[2 * i + 1] = PA(0, i); } /* Update using rest of data points */ for (i2 = 1; i2 < n; i2++) { for (j = 0; j < no_dims; j++) { bbox_idx = 2 * j; cur = PA(i2, j); if (cur < bbox[bbox_idx]) { bbox[bbox_idx] = cur; } else if (cur > bbox[bbox_idx + 1]) { bbox[bbox_idx + 1] = cur; } } } } /************************************************ Partition a range of data points by manipulation the permutation index. The sliding midpoint rule is used for the partitioning. Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions start_idx : index of first data point to use n : number of data points bbox : bounding box of data points cut_dim : dimension used for partition (return) cut_val : value of cutting point (return) n_lo : number of point below cutting plane (return) ************************************************/ int partition_double(double *pa, uint32_t * pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *bbox, int8_t * cut_dim, double *cut_val, uint32_t * n_lo) { int8_t dim = 0, i; uint32_t p, q, i2; double size = 0, min_val, max_val, split, side_len, cur_val; uint32_t end_idx = start_idx + n - 1; /* Find largest bounding box side */ for (i = 0; i < no_dims; i++) { side_len = bbox[2 * i + 1] - bbox[2 * i]; if (side_len > size) { dim = i; size = side_len; } } min_val = bbox[2 * dim]; max_val = bbox[2 * dim + 1]; /* Check for zero length or inconsistent */ if (min_val >= max_val) return 1; /* Use middle for splitting */ split = (min_val + max_val) / 2; /* Partition all data points around middle */ p = start_idx; q = end_idx; while (p <= q) { if (PA(p, dim) < split) { p++; } else if (PA(q, dim) >= split) { /* Guard for underflow */ if (q > 0) { q--; } else { break; } } else { PASWAP(p, q); p++; q--; } } /* Check for empty splits */ if (p == start_idx) { /* * No points less than split. Split at lowest point instead. Minimum * 1 point will be in lower box. */ uint32_t j = start_idx; split = PA(j, dim); for (i2 = start_idx + 1; i2 <= end_idx; i2++) { /* Find lowest point */ cur_val = PA(i2, dim); if (cur_val < split) { j = i2; split = cur_val; } } PASWAP(j, start_idx); p = start_idx + 1; } else if (p == end_idx + 1) { /* * No points greater than split. Split at highest point instead. * Minimum 1 point will be in higher box. */ uint32_t j = end_idx; split = PA(j, dim); for (i2 = start_idx; i2 < end_idx; i2++) { /* Find highest point */ cur_val = PA(i2, dim); if (cur_val > split) { j = i2; split = cur_val; } } PASWAP(j, end_idx); p = end_idx; } /* Set return values */ *cut_dim = dim; *cut_val = split; *n_lo = p - start_idx; return 0; } /************************************************ Construct a sub tree over a range of data points. Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions start_idx : index of first data point to use n : number of data points bsp : number of points per leaf bbox : bounding box of set of data points ************************************************/ Node_double * construct_subtree_double(double *pa, uint32_t * pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, double *bbox) { /* Create new node */ int is_leaf = (n <= bsp); Node_double *root = create_node_double(start_idx, n, is_leaf); int rval; int8_t cut_dim; uint32_t n_lo; double cut_val, lv, hv; if (is_leaf) { /* Make leaf node */ root->cut_dim = -1; } else { /* Make split node */ /* Partition data set and set node info */ rval = partition_double(pa, pidx, no_dims, start_idx, n, bbox, &cut_dim, &cut_val, &n_lo); if (rval == 1) { root->cut_dim = -1; return root; } root->cut_val = cut_val; root->cut_dim = cut_dim; /* Recurse on both subsets */ lv = bbox[2 * cut_dim]; hv = bbox[2 * cut_dim + 1]; /* Set bounds for cut dimension */ root->cut_bounds_lv = lv; root->cut_bounds_hv = hv; /* Update bounding box before call to lower subset and restore after */ bbox[2 * cut_dim + 1] = cut_val; root->left_child = (struct Node_double *)construct_subtree_double(pa, pidx, no_dims, start_idx, n_lo, bsp, bbox); bbox[2 * cut_dim + 1] = hv; /* Update bounding box before call to higher subset and restore after */ bbox[2 * cut_dim] = cut_val; root->right_child = (struct Node_double *)construct_subtree_double(pa, pidx, no_dims, start_idx + n_lo, n - n_lo, bsp, bbox); bbox[2 * cut_dim] = lv; } return root; } /************************************************ Construct a tree over data points. Params: pa : data points no_dims: number of dimensions n : number of data points bsp : number of points per leaf ************************************************/ Tree_double * construct_tree_double(double *pa, int8_t no_dims, uint32_t n, uint32_t bsp) { Tree_double *tree = (Tree_double *) malloc(sizeof(Tree_double)); uint32_t i; uint32_t *pidx; double *bbox; tree->no_dims = no_dims; /* Initialize permutation array */ pidx = (uint32_t *) malloc(sizeof(uint32_t) * n); for (i = 0; i < n; i++) { pidx[i] = i; } bbox = (double *)malloc(2 * sizeof(double) * no_dims); get_bounding_box_double(pa, pidx, no_dims, n, bbox); tree->bbox = bbox; /* Construct subtree on full dataset */ tree->root = (struct Node_double *)construct_subtree_double(pa, pidx, no_dims, 0, n, bsp, bbox); tree->pidx = pidx; return tree; } /************************************************ Create a tree node. Params: start_idx : index of first data point to use n : number of data points ************************************************/ Node_double * create_node_double(uint32_t start_idx, uint32_t n, int is_leaf) { Node_double *new_node; if (is_leaf) { /* * Allocate only the part of the struct that will be used in a leaf * node. This relies on the C99 specification of struct layout * conservation and padding and that dereferencing is never attempted * for the node pointers in a leaf. */ new_node = (Node_double *) malloc(sizeof(Node_double) - 2 * sizeof(Node_double *)); } else { new_node = (Node_double *) malloc(sizeof(Node_double)); } new_node->n = n; new_node->start_idx = start_idx; return new_node; } /************************************************ Delete subtree Params: root : root node of subtree to delete ************************************************/ void delete_subtree_double(Node_double * root) { if (root->cut_dim != -1) { delete_subtree_double((Node_double *) root->left_child); delete_subtree_double((Node_double *) root->right_child); } free(root); } /************************************************ Delete tree Params: tree : Tree struct of kd tree ************************************************/ void delete_tree_double(Tree_double * tree) { delete_subtree_double((Node_double *) tree->root); free(tree->bbox); free(tree->pidx); free(tree); } /************************************************ Print ************************************************/ void print_tree_double(Node_double * root, int level) { int i; for (i = 0; i < level; i++) { printf(" "); } printf("(cut_val: %f, cut_dim: %i)\n", root->cut_val, root->cut_dim); if (root->cut_dim != -1) print_tree_double((Node_double *) root->left_child, level + 1); if (root->cut_dim != -1) print_tree_double((Node_double *) root->right_child, level + 1); } /************************************************ Calculate squared cartesian distance between points Params: point1_coord : point 1 point2_coord : point 2 ************************************************/ double calc_dist_double(double *point1_coord, double *point2_coord, int8_t no_dims) { /* Calculate squared distance */ double dist = 0, dim_dist; int8_t i; for (i = 0; i < no_dims; i++) { dim_dist = point2_coord[i] - point1_coord[i]; dist += dim_dist * dim_dist; } return dist; } /************************************************ Get squared distance from point to cube in specified dimension Params: dim : dimension point_coord : cartesian coordinates of point bbox : cube ************************************************/ double get_cube_offset_double(int8_t dim, double *point_coord, double *bbox) { double dim_coord = point_coord[dim]; if (dim_coord < bbox[2 * dim]) { /* Left of cube in dimension */ return dim_coord - bbox[2 * dim]; } else if (dim_coord > bbox[2 * dim + 1]) { /* Right of cube in dimension */ return dim_coord - bbox[2 * dim + 1]; } else { /* Inside cube in dimension */ return 0.; } } /************************************************ Get minimum squared distance between point and cube. Params: point_coord : cartesian coordinates of point no_dims : number of dimensions bbox : cube ************************************************/ double get_min_dist_double(double *point_coord, int8_t no_dims, double *bbox) { double cube_offset = 0, cube_offset_dim; int8_t i; for (i = 0; i < no_dims; i++) { cube_offset_dim = get_cube_offset_double(i, point_coord, bbox); cube_offset += cube_offset_dim * cube_offset_dim; } return cube_offset; } /************************************************ Search a leaf node for closest point Params: pa : data points pidx : permutation index of data points no_dims : number of dimensions start_idx : index of first data point to use size : number of data points point_coord : query point closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_leaf_double(double *restrict pa, uint32_t * restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord, uint32_t k, uint32_t * restrict closest_idx, double *restrict closest_dist) { double cur_dist; uint32_t i; /* Loop through all points in leaf */ for (i = 0; i < n; i++) { /* Get distance to query point */ cur_dist = calc_dist_double(&PA(start_idx + i, 0), point_coord, no_dims); /* Update closest info if new point is closest so far */ if (cur_dist < closest_dist[k - 1]) { insert_point_double(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k); } } } /************************************************ Search a leaf node for closest point with data point mask Params: pa : data points pidx : permutation index of data points no_dims : number of dimensions start_idx : index of first data point to use size : number of data points point_coord : query point mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_leaf_double_mask(double *restrict pa, uint32_t * restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord, uint32_t k, uint8_t * mask, uint32_t * restrict closest_idx, double *restrict closest_dist) { double cur_dist; uint32_t i; /* Loop through all points in leaf */ for (i = 0; i < n; i++) { /* Is this point masked out? */ if (mask[pidx[start_idx + i]]) { continue; } /* Get distance to query point */ cur_dist = calc_dist_double(&PA(start_idx + i, 0), point_coord, no_dims); /* Update closest info if new point is closest so far */ if (cur_dist < closest_dist[k - 1]) { insert_point_double(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k); } } } /************************************************ Search subtree for nearest to query point Params: root : root node of subtree pa : data points pidx : permutation index of data points no_dims : number of dimensions point_coord : query point min_dist : minumum distance to nearest neighbour mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_splitnode_double(Node_double * root, double *pa, uint32_t * pidx, int8_t no_dims, double *point_coord, double min_dist, uint32_t k, double distance_upper_bound, double eps_fac, uint8_t * mask, uint32_t * closest_idx, double *closest_dist) { int8_t dim; double dist_left, dist_right; double new_offset; double box_diff; /* Skip if distance bound exeeded */ if (min_dist > distance_upper_bound) { return; } dim = root->cut_dim; /* Handle leaf node */ if (dim == -1) { if (mask) { search_leaf_double_mask(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, mask, closest_idx, closest_dist); } else { search_leaf_double(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, closest_idx, closest_dist); } return; } /* Get distance to cutting plane */ new_offset = point_coord[dim] - root->cut_val; if (new_offset < 0) { /* Left of cutting plane */ dist_left = min_dist; if (dist_left < closest_dist[k - 1] * eps_fac) { /* Search left subtree if minimum distance is below limit */ search_splitnode_double((Node_double *) root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } /* * Right of cutting plane. Update minimum distance. See Algorithms * for Fast Vector Quantization Sunil Arya and David M. Mount. */ box_diff = root->cut_bounds_lv - point_coord[dim]; if (box_diff < 0) { box_diff = 0; } dist_right = min_dist - box_diff * box_diff + new_offset * new_offset; if (dist_right < closest_dist[k - 1] * eps_fac) { /* Search right subtree if minimum distance is below limit */ search_splitnode_double((Node_double *) root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } } else { /* Right of cutting plane */ dist_right = min_dist; if (dist_right < closest_dist[k - 1] * eps_fac) { /* Search right subtree if minimum distance is below limit */ search_splitnode_double((Node_double *) root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } /* * Left of cutting plane. Update minimum distance. See Algorithms for * Fast Vector Quantization Sunil Arya and David M. Mount. */ box_diff = point_coord[dim] - root->cut_bounds_hv; if (box_diff < 0) { box_diff = 0; } dist_left = min_dist - box_diff * box_diff + new_offset * new_offset; if (dist_left < closest_dist[k - 1] * eps_fac) { /* Search left subtree if minimum distance is below limit */ search_splitnode_double((Node_double *) root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } } } /************************************************ Search for nearest neighbour for a set of query points Params: tree : Tree struct of kd tree pa : data points pidx : permutation index of data points point_coords : query points num_points : number of query points mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_tree_double(Tree_double * tree, double *pa, double *point_coords, uint32_t num_points, uint32_t k, double distance_upper_bound, double eps, uint8_t * mask, uint32_t * closest_idxs, double *closest_dists) { double min_dist; double eps_fac = 1 / ((1 + eps) * (1 + eps)); int8_t no_dims = tree->no_dims; double *bbox = tree->bbox; uint32_t *pidx = tree->pidx; uint32_t j = 0; #if defined(_MSC_VER) && defined(_OPENMP) int32_t i = 0; int32_t local_num_points = (int32_t) num_points; #else uint32_t i; uint32_t local_num_points = num_points; #endif Node_double *root = (Node_double *) tree->root; /* Queries are OpenMP enabled */ #pragma omp parallel { /* * The low chunk size is important to avoid L2 cache trashing for * spatial coherent query datasets */ #pragma omp for private(i, j) schedule(static, 100) nowait for (i = 0; i < local_num_points; i++) { for (j = 0; j < k; j++) { closest_idxs[i * k + j] = UINT32_MAX; closest_dists[i * k + j] = DBL_MAX; } min_dist = get_min_dist_double(point_coords + no_dims * i, no_dims, bbox); search_splitnode_double(root, pa, pidx, no_dims, point_coords + no_dims * i, min_dist, k, distance_upper_bound, eps_fac, mask, &closest_idxs[i * k], &closest_dists[i * k]); } } }
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/identify.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/magick.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/segment.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ typedef struct _EdgeInfo { double left, right, top, bottom; } EdgeInfo; static double GetEdgeBackgroundFactor(const Image *image, const CacheView *image_view,const GravityType gravity,const size_t width, const size_t height,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { CacheView *edge_view; double factor; Image *edge_image; PixelInfo background, pixel; RectangleInfo edge_geometry; register const Quantum *p; ssize_t y; /* Determine the percent of image background for this edge. */ switch (gravity) { case NorthWestGravity: case NorthGravity: default: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); break; } case NorthEastGravity: case EastGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); break; } case SouthEastGravity: case SouthGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); break; } case SouthWestGravity: case WestGravity: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); break; } } GetPixelInfoPixel(image,p,&background); edge_geometry.width=width; edge_geometry.height=height; edge_geometry.x=x_offset; edge_geometry.y=y_offset; GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) return(0.0); factor=0.0; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { register ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) factor++; p+=GetPixelChannels(edge_image); } } factor/=((double) edge_image->columns*edge_image->rows); edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); return(factor); } static inline double GetMinEdgeBackgroundFactor(const EdgeInfo *edge) { double factor; factor=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top), edge->bottom); return(factor); } static RectangleInfo GetEdgeBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double background_factor, percent_background; EdgeInfo edge, vertex; Image *edge_image; RectangleInfo bounds; /* Get the image bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); SetGeometry(image,&bounds); edge_image=CloneImage(image,0,0,MagickTrue,exception); if (edge_image == (Image *) NULL) return(bounds); (void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page); memset(&vertex,0,sizeof(vertex)); edge_view=AcquireVirtualCacheView(edge_image,exception); edge.left=GetEdgeBackgroundFactor(edge_image,edge_view,WestGravity, 1,0,0,0,exception); edge.right=GetEdgeBackgroundFactor(edge_image,edge_view,EastGravity, 1,0,0,0,exception); edge.top=GetEdgeBackgroundFactor(edge_image,edge_view,NorthGravity, 0,1,0,0,exception); edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view,SouthGravity, 0,1,0,0,exception); percent_background=1.0; artifact=GetImageArtifact(edge_image,"trim:percent-background"); if (artifact != (const char *) NULL) percent_background=StringToDouble(artifact,(char **) NULL)/100.0; percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon), 1.0); background_factor=GetMinEdgeBackgroundFactor(&edge); for ( ; background_factor < percent_background; background_factor=GetMinEdgeBackgroundFactor(&edge)) { if ((bounds.width == 0) || (bounds.height == 0)) break; if (fabs(edge.left-background_factor) < MagickEpsilon) { /* Trim left edge. */ vertex.left++; bounds.width--; edge.left=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.right-background_factor) < MagickEpsilon) { /* Trim right edge. */ vertex.right++; bounds.width--; edge.right=GetEdgeBackgroundFactor(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.top-background_factor) < MagickEpsilon) { /* Trim top edge. */ vertex.top++; bounds.height--; edge.left=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundFactor(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); continue; } if (fabs(edge.bottom-background_factor) < MagickEpsilon) { /* Trim bottom edge. */ vertex.bottom++; bounds.height--; edge.left=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundFactor(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); bounds.x=(ssize_t) vertex.left; bounds.y=(ssize_t) vertex.top; if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return(bounds); } MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; PixelInfo target[3], zero; RectangleInfo bounds; register const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"trim:percent-background"); if (artifact != (const char *) NULL) return(GetEdgeBoundingBox(image,exception)); bounds.width=0; bounds.height=0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; GetPixelInfo(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } GetPixelInfoPixel(image,p,&target[0]); GetPixelInfo(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[1]); GetPixelInfo(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[2]); status=MagickTrue; GetPixelInfo(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; RectangleInfo bounding_box; register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,p,&pixel); if ((x < bounding_box.x) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; p+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDepth() returns the depth of a particular image channel. % % The format of the GetImageDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(p[i])]; } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif /* Compute pixel depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { QuantumAny range; range=GetQuantumRange(current_depth[id]); if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range)) break; current_depth[id]++; } } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ImageType GetImageType(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IsImageMonochrome(image) != MagickFalse) return(BilevelType); if (IsImageGray(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IsPaletteImage(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageGray() returns grayscale if all the pixels in the image have % the same red, green, and blue intensities, and bi-level is the intensity is % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register const Quantum *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(image,p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(image,p) == MagickFalse)) type=GrayscaleType; p+=GetPixelChannels(image); } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait)) type=GrayscaleAlphaType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType bilevel; register ssize_t x; register const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); bilevel=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(image,p) == MagickFalse) { bilevel=MagickFalse; break; } p+=GetPixelChannels(image); } if (bilevel == MagickFalse) break; } image_view=DestroyCacheView(image_view); return(bilevel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IdentifyImageMonochrome(image,exception) != MagickFalse) return(BilevelType); if (IdentifyImageGray(image,exception) != UndefinedType) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageGray() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsImageGray method is: % % MagickBooleanType IsImageGray(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageGray(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageMonochrome() returns MagickTrue if type of the image is bi-level. % % The format of the IsImageMonochrome method is: % % MagickBooleanType IsImageMonochrome(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageMonochrome(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O p a q u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageOpaque() returns MagickTrue if none of the pixels in the image have % an alpha value other than OpaqueAlpha (QuantumRange). % % Will return true immediatally is alpha channel is not available. % % The format of the IsImageOpaque method is: % % MagickBooleanType IsImageOpaque(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImageOpaque(const Image *image, ExceptionInfo *exception) { CacheView *image_view; register const Quantum *p; register ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,p) != OpaqueAlpha) break; p+=GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageDepth() sets the depth of the image. % % The format of the SetImageDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].alpha),range),range); } } status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { Quantum *depth_map; register ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=depth_map[ScaleQuantumToMap(q[i])]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType) q[i]),range),range); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type, ExceptionInfo *exception) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { status=TransformImageColorspace(image,GRAYColorspace,exception); (void) NormalizeImage(image,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleType: { status=TransformImageColorspace(image,GRAYColorspace,exception); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleAlphaType: { status=TransformImageColorspace(image,GRAYColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case PaletteType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } image->alpha_trait=UndefinedPixelTrait; break; } case PaletteBilevelAlphaType: { ChannelType channel_mask; status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); channel_mask=SetImageChannelMask(image,AlphaChannel); (void) BilevelImage(image,(double) QuantumRange/2.0,exception); (void) SetImageChannelMask(image,channel_mask); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case TrueColorAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case ColorSeparationType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case ColorSeparationAlphaType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(status); image->type=type; return(MagickTrue); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/identify.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/magick.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/segment.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + G e t I m a g e B o u n d i n g B o x * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageBoundingBox() returns the bounding box of an image * canvas. % % The format of the GetImageBoundingBox method is: % % * RectangleInfo GetImageBoundingBox(const Image *image, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o bounds: Method GetImageBoundingBox returns the bounding box of an % * image canvas. % % o image: the image. % % o exception: return any * errors or warnings in this structure. % */ typedef struct _EdgeInfo { double left, right, top, bottom; } EdgeInfo; static double GetEdgeBackgroundFactor(const Image * image, const CacheView * image_view, const GravityType gravity, const size_t width, const size_t height, const ssize_t x_offset, const ssize_t y_offset, ExceptionInfo * exception) { CacheView * edge_view; double factor; Image * edge_image; PixelInfo background, pixel; RectangleInfo edge_geometry; register const Quantum * p; ssize_t y; /* * Determine the percent of image background for this edge. */ switch (gravity) { case NorthWestGravity: case NorthGravity: default: { p = GetCacheViewVirtualPixels(image_view, 0, 0, 1, 1, exception); break; } case NorthEastGravity: case EastGravity: { p = GetCacheViewVirtualPixels(image_view, (ssize_t) image->columns - 1, 0, 1, 1, exception); break; } case SouthEastGravity: case SouthGravity: { p = GetCacheViewVirtualPixels(image_view, (ssize_t) image->columns - 1, (ssize_t) image->rows - 1, 1, 1, exception); break; } case SouthWestGravity: case WestGravity: { p = GetCacheViewVirtualPixels(image_view, 0, (ssize_t) image->rows - 1, 1, 1, exception); break; } } GetPixelInfoPixel(image, p, &background); edge_geometry.width = width; edge_geometry.height = height; edge_geometry.x = x_offset; edge_geometry.y = y_offset; GravityAdjustGeometry(image->columns, image->rows, gravity, &edge_geometry); edge_image = CropImage(image, &edge_geometry, exception); if (edge_image == (Image *) NULL) return (0.0); factor = 0.0; edge_view = AcquireVirtualCacheView(edge_image, exception); for (y = 0; y < (ssize_t) edge_image->rows; y++) { register ssize_t x; p = GetCacheViewVirtualPixels(edge_view, 0, y, edge_image->columns, 1, exception); if (p == (const Quantum *)NULL) break; for (x = 0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image, p, &pixel); if (IsFuzzyEquivalencePixelInfo(&pixel, &background) == MagickFalse) factor++; p += GetPixelChannels(edge_image); } } factor /= ((double)edge_image->columns * edge_image->rows); edge_view = DestroyCacheView(edge_view); edge_image = DestroyImage(edge_image); return (factor); } static inline double GetMinEdgeBackgroundFactor(const EdgeInfo * edge) { double factor; factor = MagickMin(MagickMin(MagickMin(edge->left, edge->right), edge->top), edge->bottom); return (factor); } static RectangleInfo GetEdgeBoundingBox(const Image * image, ExceptionInfo * exception) { CacheView * edge_view; const char *artifact; double background_factor, percent_background; EdgeInfo edge, vertex; Image * edge_image; RectangleInfo bounds; /* * Get the image bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); SetGeometry(image, &bounds); edge_image = CloneImage(image, 0, 0, MagickTrue, exception); if (edge_image == (Image *) NULL) return (bounds); (void)ParseAbsoluteGeometry("0x0+0+0", &edge_image->page); memset(&vertex, 0, sizeof(vertex)); edge_view = AcquireVirtualCacheView(edge_image, exception); edge.left = GetEdgeBackgroundFactor(edge_image, edge_view, WestGravity, 1, 0, 0, 0, exception); edge.right = GetEdgeBackgroundFactor(edge_image, edge_view, EastGravity, 1, 0, 0, 0, exception); edge.top = GetEdgeBackgroundFactor(edge_image, edge_view, NorthGravity, 0, 1, 0, 0, exception); edge.bottom = GetEdgeBackgroundFactor(edge_image, edge_view, SouthGravity, 0, 1, 0, 0, exception); percent_background = 1.0; artifact = GetImageArtifact(edge_image, "trim:percent-background"); if (artifact != (const char *)NULL) percent_background = StringToDouble(artifact, (char **)NULL) / 100.0; percent_background = MagickMin(MagickMax(1.0 - percent_background, MagickEpsilon), 1.0); background_factor = GetMinEdgeBackgroundFactor(&edge); for (; background_factor < percent_background; background_factor = GetMinEdgeBackgroundFactor(&edge)) { if ((bounds.width == 0) || (bounds.height == 0)) break; if (fabs(edge.left - background_factor) < MagickEpsilon) { /* * Trim left edge. */ vertex.left++; bounds.width--; edge.left = GetEdgeBackgroundFactor(edge_image, edge_view, NorthWestGravity, 1, bounds.height, (ssize_t) vertex.left, (ssize_t) vertex.top, exception); edge.top = GetEdgeBackgroundFactor(edge_image, edge_view, NorthWestGravity, bounds.width, 1, (ssize_t) vertex.left, (ssize_t) vertex.top, exception); edge.bottom = GetEdgeBackgroundFactor(edge_image, edge_view, SouthWestGravity, bounds.width, 1, (ssize_t) vertex.left, (ssize_t) vertex.bottom, exception); continue; } if (fabs(edge.right - background_factor) < MagickEpsilon) { /* * Trim right edge. */ vertex.right++; bounds.width--; edge.right = GetEdgeBackgroundFactor(edge_image, edge_view, NorthEastGravity, 1, bounds.height, (ssize_t) vertex.right, (ssize_t) vertex.top, exception); edge.top = GetEdgeBackgroundFactor(edge_image, edge_view, NorthWestGravity, bounds.width, 1, (ssize_t) vertex.left, (ssize_t) vertex.top, exception); edge.bottom = GetEdgeBackgroundFactor(edge_image, edge_view, SouthWestGravity, bounds.width, 1, (ssize_t) vertex.left, (ssize_t) vertex.bottom, exception); continue; } if (fabs(edge.top - background_factor) < MagickEpsilon) { /* * Trim top edge. */ vertex.top++; bounds.height--; edge.left = GetEdgeBackgroundFactor(edge_image, edge_view, NorthWestGravity, 1, bounds.height, (ssize_t) vertex.left, (ssize_t) vertex.top, exception); edge.right = GetEdgeBackgroundFactor(edge_image, edge_view, NorthEastGravity, 1, bounds.height, (ssize_t) vertex.right, (ssize_t) vertex.top, exception); edge.top = GetEdgeBackgroundFactor(edge_image, edge_view, NorthWestGravity, bounds.width, 1, (ssize_t) vertex.left, (ssize_t) vertex.top, exception); continue; } if (fabs(edge.bottom - background_factor) < MagickEpsilon) { /* * Trim bottom edge. */ vertex.bottom++; bounds.height--; edge.left = GetEdgeBackgroundFactor(edge_image, edge_view, NorthWestGravity, 1, bounds.height, (ssize_t) vertex.left, (ssize_t) vertex.top, exception); edge.right = GetEdgeBackgroundFactor(edge_image, edge_view, NorthEastGravity, 1, bounds.height, (ssize_t) vertex.right, (ssize_t) vertex.top, exception); edge.bottom = GetEdgeBackgroundFactor(edge_image, edge_view, SouthWestGravity, bounds.width, 1, (ssize_t) vertex.left, (ssize_t) vertex.bottom, exception); continue; } } edge_view = DestroyCacheView(edge_view); edge_image = DestroyImage(edge_image); bounds.x = (ssize_t) vertex.left; bounds.y = (ssize_t) vertex.top; if ((bounds.width == 0) || (bounds.height == 0)) (void)ThrowMagickException(exception, GetMagickModule(), OptionWarning, "GeometryDoesNotContainImage", "`%s'", image->filename); return (bounds); } MagickExport RectangleInfo GetImageBoundingBox(const Image * image, ExceptionInfo * exception) { CacheView * image_view; const char *artifact; MagickBooleanType status; PixelInfo target[3], zero; RectangleInfo bounds; register const Quantum * p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); artifact = GetImageArtifact(image, "trim:percent-background"); if (artifact != (const char *)NULL) return (GetEdgeBoundingBox(image, exception)); bounds.width = 0; bounds.height = 0; bounds.x = (ssize_t) image->columns; bounds.y = (ssize_t) image->rows; GetPixelInfo(image, &target[0]); image_view = AcquireVirtualCacheView(image, exception); p = GetCacheViewVirtualPixels(image_view, 0, 0, 1, 1, exception); if (p == (const Quantum *)NULL) { image_view = DestroyCacheView(image_view); return (bounds); } GetPixelInfoPixel(image, p, &target[0]); GetPixelInfo(image, &target[1]); p = GetCacheViewVirtualPixels(image_view, (ssize_t) image->columns - 1, 0, 1, 1, exception); if (p != (const Quantum *)NULL) GetPixelInfoPixel(image, p, &target[1]); GetPixelInfo(image, &target[2]); p = GetCacheViewVirtualPixels(image_view, 0, (ssize_t) image->rows - 1, 1, 1, exception); if (p != (const Quantum *)NULL) GetPixelInfoPixel(image, p, &target[2]); status = MagickTrue; GetPixelInfo(image, &zero); for (y = 0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; RectangleInfo bounding_box; register const Quantum * magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; bounding_box = bounds; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) { status = MagickFalse; continue; } pixel = zero; for (x = 0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image, p, &pixel); if ((x < bounding_box.x) && (IsFuzzyEquivalencePixelInfo(&pixel, &target[0]) == MagickFalse)) bounding_box.x = x; if ((x > (ssize_t) bounding_box.width) && (IsFuzzyEquivalencePixelInfo(&pixel, &target[1]) == MagickFalse)) bounding_box.width = (size_t) x; if ((y < bounding_box.y) && (IsFuzzyEquivalencePixelInfo(&pixel, &target[0]) == MagickFalse)) bounding_box.y = y; if ((y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel, &target[2]) == MagickFalse)) bounding_box.height = (size_t) y; p += GetPixelChannels(image); } { if (bounding_box.x < bounds.x) bounds.x = bounding_box.x; if (bounding_box.y < bounds.y) bounds.y = bounding_box.y; if (bounding_box.width > bounds.width) bounds.width = bounding_box.width; if (bounding_box.height > bounds.height) bounds.height = bounding_box.height; } } image_view = DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void)ThrowMagickException(exception, GetMagickModule(), OptionWarning, "GeometryDoesNotContainImage", "`%s'", image->filename); else { bounds.width -= (bounds.x - 1); bounds.height -= (bounds.y - 1); } return (bounds); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e D e p t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageDepth() returns the depth of a particular image channel. * % % The format of the GetImageDepth method is: % % size_t * GetImageDepth(const Image *image,ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image * image, ExceptionInfo * exception) { CacheView * image_view; MagickBooleanType status; register ssize_t i; size_t * current_depth, depth, number_threads; ssize_t y; /* * Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); number_threads = (size_t) GetMagickResourceLimit(ThreadResource); current_depth = (size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); status = MagickTrue; for (i = 0; i < (ssize_t) number_threads; i++) current_depth[i] = 1; if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { for (i = 0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth = MagickTrue; range = GetQuantumRange(current_depth[id]); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red), range) == MagickFalse) atDepth = MagickFalse; if ((atDepth != MagickFalse) && (GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green), range) == MagickFalse) atDepth = MagickFalse; if ((atDepth != MagickFalse) && (GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue), range) == MagickFalse) atDepth = MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth = current_depth[0]; for (i = 1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth = current_depth[i]; current_depth = (size_t *) RelinquishMagickMemory(current_depth); return (depth); } image_view = AcquireVirtualCacheView(image, exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL * QuantumRange) <= MaxMap) { size_t * depth_map; /* * Scale pixels to desired (optimized with depth map). */ depth_map = (size_t *) AcquireQuantumMemory(MaxMap + 1, sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); for (i = 0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth = 1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range = GetQuantumRange(depth); pixel = (Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel, range), range)) break; } depth_map[i] = depth; } for (y = 0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum * magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) continue; for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id]) current_depth[id] = depth_map[ScaleQuantumToMap(p[i])]; } p += GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status = MagickFalse; } image_view = DestroyCacheView(image_view); depth = current_depth[0]; for (i = 1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth = current_depth[i]; depth_map = (size_t *) RelinquishMagickMemory(depth_map); current_depth = (size_t *) RelinquishMagickMemory(current_depth); return (depth); } #endif /* * Compute pixel depth. */ for (y = 0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum * magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) continue; for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel = GetPixelChannelChannel(image, i); traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { QuantumAny range; range = GetQuantumRange(current_depth[id]); if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i], range), range)) break; current_depth[id]++; } } p += GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status = MagickFalse; } image_view = DestroyCacheView(image_view); depth = current_depth[0]; for (i = 1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth = current_depth[i]; current_depth = (size_t *) RelinquishMagickMemory(current_depth); return (depth); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e Q u a n t u m D e p t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageQuantumDepth() returns the depth of the image rounded to * a legal % quantum depth: 8, 16, or 32. % % The format of the * GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const * Image *image, % const MagickBooleanType constrain) % % A * description of each parameter follows: % % o image: the image. % % o * constrain: A value other than MagickFalse, constrains the depth to % * a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image * image, const MagickBooleanType constrain) { size_t depth; depth = image->depth; if (depth <= 8) depth = 8; else if (depth <= 16) depth = 16; else if (depth <= 32) depth = 32; else if (depth <= 64) depth = 64; if (constrain != MagickFalse) depth = (size_t) MagickMin((double)depth, (double)MAGICKCORE_QUANTUM_DEPTH); return (depth); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e T y p e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageType() returns the type of image: % % Bilevel * Grayscale GrayscaleMatte % Palette PaletteMatte * TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % * % The format of the GetImageType method is: % % ImageType * GetImageType(const Image *image) % % A description of each parameter * follows: % % o image: the image. % */ MagickExport ImageType GetImageType(const Image * image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return (ColorSeparationType); return (ColorSeparationAlphaType); } if (IsImageMonochrome(image) != MagickFalse) return (BilevelType); if (IsImageGray(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return (GrayscaleAlphaType); return (GrayscaleType); } if (IsPaletteImage(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return (PaletteAlphaType); return (PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return (TrueColorAlphaType); return (TrueColorType); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I d e n t i f y I m a g e G r a y * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IdentifyImageGray() returns grayscale if all the pixels in the * image have % the same red, green, and blue intensities, and bi-level is * the intensity is % either 0 or QuantumRange. Otherwise undefined is * returned. % % The format of the IdentifyImageGray method is: % % * ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % * % A description of each parameter follows: % % o image: the image. % % * o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image * image, ExceptionInfo * exception) { CacheView * image_view; ImageType type; register const Quantum * p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return (image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return (UndefinedType); type = BilevelType; image_view = AcquireVirtualCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(image, p) == MagickFalse) { type = UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(image, p) == MagickFalse)) type = GrayscaleType; p += GetPixelChannels(image); } if (type == UndefinedType) break; } image_view = DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait)) type = GrayscaleAlphaType; return (type); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I d e n t i f y I m a g e M o n o c h r o m e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels * in the image % have the same red, green, and blue intensities and the * intensity is either % 0 or QuantumRange. % % The format of the * IdentifyImageMonochrome method is: % % MagickBooleanType * IdentifyImageMonochrome(const Image *image, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o exception: return any errors or warnings in this * structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image * image, ExceptionInfo * exception) { CacheView * image_view; MagickBooleanType bilevel; register ssize_t x; register const Quantum * p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->type == BilevelType) return (MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return (MagickFalse); bilevel = MagickTrue; image_view = AcquireVirtualCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(image, p) == MagickFalse) { bilevel = MagickFalse; break; } p += GetPixelChannels(image); } if (bilevel == MagickFalse) break; } image_view = DestroyCacheView(image_view); return (bilevel); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I d e n t i f y I m a g e T y p e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IdentifyImageType() returns the potential type of image: % % * Bilevel Grayscale GrayscaleMatte % Palette * PaletteMatte TrueColor % TrueColorMatte ColorSeparation * ColorSeparationMatte % % To ensure the image type matches its potential, * use SetImageType(): % % (void) * SetImageType(image,IdentifyImageType(image,exception),exception); % % The * format of the IdentifyImageType method is: % % ImageType * IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image * image, ExceptionInfo * exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return (ColorSeparationType); return (ColorSeparationAlphaType); } if (IdentifyImageMonochrome(image, exception) != MagickFalse) return (BilevelType); if (IdentifyImageGray(image, exception) != UndefinedType) { if (image->alpha_trait != UndefinedPixelTrait) return (GrayscaleAlphaType); return (GrayscaleType); } if (IdentifyPaletteImage(image, exception) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return (PaletteAlphaType); return (PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return (TrueColorAlphaType); return (TrueColorType); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I s I m a g e G r a y * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IsImageGray() returns MagickTrue if the type of the image is * grayscale or % bi-level. % % The format of the IsImageGray method is: % * % MagickBooleanType IsImageGray(const Image *image) % % A * description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageGray(const Image * image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return (MagickTrue); return (MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I s I m a g e M o n o c h r o m e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IsImageMonochrome() returns MagickTrue if type of the image is * bi-level. % % The format of the IsImageMonochrome method is: % % * MagickBooleanType IsImageMonochrome(const Image *image) % % A description * of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageMonochrome(const Image * image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return (MagickTrue); return (MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I s I m a g e O p a q u e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IsImageOpaque() returns MagickTrue if none of the pixels in the * image have % an alpha value other than OpaqueAlpha (QuantumRange). % % * Will return true immediatally is alpha channel is not available. % % The * format of the IsImageOpaque method is: % % MagickBooleanType * IsImageOpaque(const Image *image, % ExceptionInfo *exception) % % * A description of each parameter follows: % % o image: the image. % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImageOpaque(const Image * image, ExceptionInfo * exception) { CacheView * image_view; register const Quantum * p; register ssize_t x; ssize_t y; /* * Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->alpha_trait == UndefinedPixelTrait) return (MagickTrue); image_view = AcquireVirtualCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image, p) != OpaqueAlpha) break; p += GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; } image_view = DestroyCacheView(image_view); return (y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e D e p t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageDepth() sets the depth of the image. % % The format of * the SetImageDepth method is: % % MagickBooleanType * SetImageDepth(Image *image,const size_t depth, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o channel: the channel. % % o depth: the image depth. * % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageDepth(Image * image, const size_t depth, ExceptionInfo * exception) { CacheView * image_view; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth = depth; return (MagickTrue); } range = GetQuantumRange(depth); if (image->storage_class == PseudoClass) { register ssize_t i; for (i = 0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red = (double)ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red), range), range); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green = (double)ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green), range), range); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue = (double)ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue), range), range); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha = (double)ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].alpha), range), range); } } status = MagickTrue; image_view = AcquireAuthenticCacheView(image, exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL * QuantumRange) <= MaxMap) { Quantum * depth_map; register ssize_t i; /* * Scale pixels to desired (optimized with depth map). */ depth_map = (Quantum *) AcquireQuantumMemory(MaxMap + 1, sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); for (i = 0; i <= (ssize_t) MaxMap; i++) depth_map[i] = ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i, range), range); for (y = 0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel = GetPixelChannelChannel(image, i); traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = depth_map[ScaleQuantumToMap(q[i])]; } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) { status = MagickFalse; continue; } } image_view = DestroyCacheView(image_view); depth_map = (Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth = depth; return (status); } #endif /* * Scale pixels to desired depth. */ for (y = 0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel = GetPixelChannelChannel(image, i); traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType) q[i]), range), range); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) { status = MagickFalse; continue; } } image_view = DestroyCacheView(image_view); if (status != MagickFalse) image->depth = depth; return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e T y p e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageType() sets the type of image. Choose from these types: * % % Bilevel Grayscale GrayscaleMatte % Palette * PaletteMatte TrueColor % TrueColorMatte ColorSeparation * ColorSeparationMatte % OptimizeType % % The format of the * SetImageType method is: % % MagickBooleanType SetImageType(Image * *image,const ImageType type, % ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * type: Image type. % % o exception: return any errors or warnings in * this structure. % */ MagickExport MagickBooleanType SetImageType(Image * image, const ImageType type, ExceptionInfo * exception) { const char *artifact; ImageInfo * image_info; MagickBooleanType status; QuantizeInfo * quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickCoreSignature); status = MagickTrue; image_info = AcquireImageInfo(); image_info->dither = image->dither; artifact = GetImageArtifact(image, "dither"); if (artifact != (const char *)NULL) (void)SetImageOption(image_info, "dither", artifact); switch (type) { case BilevelType: { status = TransformImageColorspace(image, GRAYColorspace, exception); (void)NormalizeImage(image, exception); quantize_info = AcquireQuantizeInfo(image_info); quantize_info->number_colors = 2; quantize_info->colorspace = GRAYColorspace; status = QuantizeImage(quantize_info, image, exception); quantize_info = DestroyQuantizeInfo(quantize_info); image->alpha_trait = UndefinedPixelTrait; break; } case GrayscaleType: { status = TransformImageColorspace(image, GRAYColorspace, exception); image->alpha_trait = UndefinedPixelTrait; break; } case GrayscaleAlphaType: { status = TransformImageColorspace(image, GRAYColorspace, exception); if (image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); break; } case PaletteType: { status = TransformImageColorspace(image, sRGBColorspace, exception); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info = AcquireQuantizeInfo(image_info); quantize_info->number_colors = 256; status = QuantizeImage(quantize_info, image, exception); quantize_info = DestroyQuantizeInfo(quantize_info); } image->alpha_trait = UndefinedPixelTrait; break; } case PaletteBilevelAlphaType: { ChannelType channel_mask; status = TransformImageColorspace(image, sRGBColorspace, exception); if (image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); channel_mask = SetImageChannelMask(image, AlphaChannel); (void)BilevelImage(image, (double)QuantumRange / 2.0, exception); (void)SetImageChannelMask(image, channel_mask); quantize_info = AcquireQuantizeInfo(image_info); status = QuantizeImage(quantize_info, image, exception); quantize_info = DestroyQuantizeInfo(quantize_info); break; } case PaletteAlphaType: { status = TransformImageColorspace(image, sRGBColorspace, exception); if (image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); quantize_info = AcquireQuantizeInfo(image_info); quantize_info->colorspace = TransparentColorspace; status = QuantizeImage(quantize_info, image, exception); quantize_info = DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { status = TransformImageColorspace(image, sRGBColorspace, exception); if (image->storage_class != DirectClass) status = SetImageStorageClass(image, DirectClass, exception); image->alpha_trait = UndefinedPixelTrait; break; } case TrueColorAlphaType: { status = TransformImageColorspace(image, sRGBColorspace, exception); if (image->storage_class != DirectClass) status = SetImageStorageClass(image, DirectClass, exception); if (image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); break; } case ColorSeparationType: { status = TransformImageColorspace(image, CMYKColorspace, exception); if (image->storage_class != DirectClass) status = SetImageStorageClass(image, DirectClass, exception); image->alpha_trait = UndefinedPixelTrait; break; } case ColorSeparationAlphaType: { status = TransformImageColorspace(image, CMYKColorspace, exception); if (image->storage_class != DirectClass) status = SetImageStorageClass(image, DirectClass, exception); if (image->alpha_trait == UndefinedPixelTrait) status = SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); break; } case OptimizeType: case UndefinedType: break; } image_info = DestroyImageInfo(image_info); if (status == MagickFalse) return (status); image->type = type; return (MagickTrue); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/identify.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/magick.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/segment.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + G e t I m a g e B o u n d i n g B o x * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageBoundingBox() returns the bounding box of an image * canvas. % % The format of the GetImageBoundingBox method is: % % * RectangleInfo GetImageBoundingBox(const Image *image, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o bounds: Method GetImageBoundingBox returns the bounding box of an % * image canvas. % % o image: the image. % % o exception: return any * errors or warnings in this structure. % */ typedef struct _EdgeInfo { double left, right, top, bottom; } EdgeInfo; static double GetEdgeBackgroundFactor(const Image * image, const CacheView * image_view, const GravityType gravity, const size_t width, const size_t height, const ssize_t x_offset, const ssize_t y_offset, ExceptionInfo * exception) { CacheView * edge_view; double factor; Image * edge_image; PixelInfo background, pixel; RectangleInfo edge_geometry; register const Quantum * p; ssize_t y; /* * Determine the percent of image background for this edge. */ switch (gravity) { case NorthWestGravity: case NorthGravity: default: { p = GetCacheViewVirtualPixels(image_view, 0, 0, 1, 1, exception); break; } case NorthEastGravity: case EastGravity: { p = GetCacheViewVirtualPixels(image_view, (ssize_t) image->columns - 1, 0, 1, 1, exception); break; } case SouthEastGravity: case SouthGravity: { p = GetCacheViewVirtualPixels(image_view, (ssize_t) image->columns - 1, (ssize_t) image->rows - 1, 1, 1, exception); break; } case SouthWestGravity: case WestGravity: { p = GetCacheViewVirtualPixels(image_view, 0, (ssize_t) image->rows - 1, 1, 1, exception); break; } } GetPixelInfoPixel(image, p, &background); edge_geometry.width = width; edge_geometry.height = height; edge_geometry.x = x_offset; edge_geometry.y = y_offset; GravityAdjustGeometry(image->columns, image->rows, gravity, &edge_geometry); edge_image = CropImage(image, &edge_geometry, exception); if (edge_image == (Image *) NULL) return (0.0); factor = 0.0; edge_view = AcquireVirtualCacheView(edge_image, exception); for (y = 0; y < (ssize_t) edge_image->rows; y++) { register ssize_t x; p = GetCacheViewVirtualPixels(edge_view, 0, y, edge_image->columns, 1, exception); if (p == (const Quantum *)NULL) break; for (x = 0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image, p, &pixel); if (IsFuzzyEquivalencePixelInfo(&pixel, &background) == MagickFalse) factor++; p += GetPixelChannels(edge_image); } } factor /= ((double)edge_image->columns * edge_image->rows); edge_view = DestroyCacheView(edge_view); edge_image = DestroyImage(edge_image); return (factor); } static inline double GetMinEdgeBackgroundFactor(const EdgeInfo * edge) { double factor; factor = MagickMin(MagickMin(MagickMin(edge->left, edge->right), edge->top), edge->bottom); return (factor); } static RectangleInfo GetEdgeBoundingBox(const Image * image, ExceptionInfo * exception) { CacheView * edge_view; const char *artifact; double background_factor, percent_background; EdgeInfo edge, vertex; Image * edge_image; RectangleInfo bounds; /* * Get the image bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); SetGeometry(image, &bounds); edge_image = CloneImage(image, 0, 0, MagickTrue, exception); if (edge_image == (Image *) NULL) return (bounds); (void)ParseAbsoluteGeometry("0x0+0+0", &edge_image->page); memset(&vertex, 0, sizeof(vertex)); edge_view = AcquireVirtualCacheView(edge_image, exception); edge.left = GetEdgeBackgroundFactor(edge_image, edge_view, WestGravity, 1, 0, 0, 0, exception); edge.right = GetEdgeBackgroundFactor(edge_image, edge_view, EastGravity, 1, 0, 0, 0, exception); edge.top = GetEdgeBackgroundFactor(edge_image, edge_view, NorthGravity, 0, 1, 0, 0, exception); edge.bottom = GetEdgeBackgroundFactor(edge_image, edge_view, SouthGravity, 0, 1, 0, 0, exception); percent_background = 1.0; artifact = GetImageArtifact(edge_image, "trim:percent-background"); if (artifact != (const char *)NULL) percent_background = StringToDouble(artifact, (char **)NULL) / 100.0; percent_background = MagickMin(MagickMax(1.0 - percent_background, MagickEpsilon), 1.0); background_factor = GetMinEdgeBackgroundFactor(&edge); for (; background_factor < percent_background; background_factor = GetMinEdgeBackgroundFactor(&edge)) { if ((bounds.width == 0) || (bounds.height == 0)) break; if (fabs(edge.left - background_factor) < MagickEpsilon) { /* * Trim left edge. */ vertex.left++; bounds.width--; edge.left = GetEdgeBackgroundFactor(edge_image, edge_view, NorthWestGravity, 1, bounds.height, (ssize_t) vertex.left, (ssize_t) vertex.top, exception); edge.top = GetEdgeBackgroundFactor(edge_image, edge_view, NorthWestGravity, bounds.width, 1, (ssize_t) vertex.left, (ssize_t) vertex.top, exception); edge.bottom = GetEdgeBackgroundFactor(edge_image, edge_view, SouthWestGravity, bounds.width, 1, (ssize_t) vertex.left, (ssize_t) vertex.bottom, exception); continue; } if (fabs(edge.right - background_factor) < MagickEpsilon) { /* * Trim right edge. */ vertex.right++; bounds.width--; edge.right = GetEdgeBackgroundFactor(edge_image, edge_view, NorthEastGravity, 1, bounds.height, (ssize_t) vertex.right, (ssize_t) vertex.top, exception); edge.top = GetEdgeBackgroundFactor(edge_image, edge_view, NorthWestGravity, bounds.width, 1, (ssize_t) vertex.left, (ssize_t) vertex.top, exception); edge.bottom = GetEdgeBackgroundFactor(edge_image, edge_view, SouthWestGravity, bounds.width, 1, (ssize_t) vertex.left, (ssize_t) vertex.bottom, exception); continue; } if (fabs(edge.top - background_factor) < MagickEpsilon) { /* * Trim top edge. */ vertex.top++; bounds.height--; edge.left = GetEdgeBackgroundFactor(edge_image, edge_view, NorthWestGravity, 1, bounds.height, (ssize_t) vertex.left, (ssize_t) vertex.top, exception); edge.right = GetEdgeBackgroundFactor(edge_image, edge_view, NorthEastGravity, 1, bounds.height, (ssize_t) vertex.right, (ssize_t) vertex.top, exception); edge.top = GetEdgeBackgroundFactor(edge_image, edge_view, NorthWestGravity, bounds.width, 1, (ssize_t) vertex.left, (ssize_t) vertex.top, exception); continue; } if (fabs(edge.bottom - background_factor) < MagickEpsilon) { /* * Trim bottom edge. */ vertex.bottom++; bounds.height--; edge.left = GetEdgeBackgroundFactor(edge_image, edge_view, NorthWestGravity, 1, bounds.height, (ssize_t) vertex.left, (ssize_t) vertex.top, exception); edge.right = GetEdgeBackgroundFactor(edge_image, edge_view, NorthEastGravity, 1, bounds.height, (ssize_t) vertex.right, (ssize_t) vertex.top, exception); edge.bottom = GetEdgeBackgroundFactor(edge_image, edge_view, SouthWestGravity, bounds.width, 1, (ssize_t) vertex.left, (ssize_t) vertex.bottom, exception); continue; } } edge_view = DestroyCacheView(edge_view); edge_image = DestroyImage(edge_image); bounds.x = (ssize_t) vertex.left; bounds.y = (ssize_t) vertex.top; if ((bounds.width == 0) || (bounds.height == 0)) (void)ThrowMagickException(exception, GetMagickModule(), OptionWarning, "GeometryDoesNotContainImage", "`%s'", image->filename); return (bounds); } MagickExport RectangleInfo GetImageBoundingBox(const Image * image, ExceptionInfo * exception) { CacheView * image_view; const char *artifact; MagickBooleanType status; PixelInfo target[3], zero; RectangleInfo bounds; register const Quantum * p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); artifact = GetImageArtifact(image, "trim:percent-background"); if (artifact != (const char *)NULL) return (GetEdgeBoundingBox(image, exception)); bounds.width = 0; bounds.height = 0; bounds.x = (ssize_t) image->columns; bounds.y = (ssize_t) image->rows; GetPixelInfo(image, &target[0]); image_view = AcquireVirtualCacheView(image, exception); p = GetCacheViewVirtualPixels(image_view, 0, 0, 1, 1, exception); if (p == (const Quantum *)NULL) { image_view = DestroyCacheView(image_view); return (bounds); } GetPixelInfoPixel(image, p, &target[0]); GetPixelInfo(image, &target[1]); p = GetCacheViewVirtualPixels(image_view, (ssize_t) image->columns - 1, 0, 1, 1, exception); if (p != (const Quantum *)NULL) GetPixelInfoPixel(image, p, &target[1]); GetPixelInfo(image, &target[2]); p = GetCacheViewVirtualPixels(image_view, 0, (ssize_t) image->rows - 1, 1, 1, exception); if (p != (const Quantum *)NULL) GetPixelInfoPixel(image, p, &target[2]); status = MagickTrue; GetPixelInfo(image, &zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; RectangleInfo bounding_box; register const Quantum * magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box = bounds; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) { status = MagickFalse; continue; } pixel = zero; for (x = 0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image, p, &pixel); if ((x < bounding_box.x) && (IsFuzzyEquivalencePixelInfo(&pixel, &target[0]) == MagickFalse)) bounding_box.x = x; if ((x > (ssize_t) bounding_box.width) && (IsFuzzyEquivalencePixelInfo(&pixel, &target[1]) == MagickFalse)) bounding_box.width = (size_t) x; if ((y < bounding_box.y) && (IsFuzzyEquivalencePixelInfo(&pixel, &target[0]) == MagickFalse)) bounding_box.y = y; if ((y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel, &target[2]) == MagickFalse)) bounding_box.height = (size_t) y; p += GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x = bounding_box.x; if (bounding_box.y < bounds.y) bounds.y = bounding_box.y; if (bounding_box.width > bounds.width) bounds.width = bounding_box.width; if (bounding_box.height > bounds.height) bounds.height = bounding_box.height; } } image_view = DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void)ThrowMagickException(exception, GetMagickModule(), OptionWarning, "GeometryDoesNotContainImage", "`%s'", image->filename); else { bounds.width -= (bounds.x - 1); bounds.height -= (bounds.y - 1); } return (bounds); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e D e p t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageDepth() returns the depth of a particular image channel. * % % The format of the GetImageDepth method is: % % size_t * GetImageDepth(const Image *image,ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image * image, ExceptionInfo * exception) { CacheView * image_view; MagickBooleanType status; register ssize_t i; size_t * current_depth, depth, number_threads; ssize_t y; /* * Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); number_threads = (size_t) GetMagickResourceLimit(ThreadResource); current_depth = (size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); status = MagickTrue; for (i = 0; i < (ssize_t) number_threads; i++) current_depth[i] = 1; if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { for (i = 0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth = MagickTrue; range = GetQuantumRange(current_depth[id]); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red), range) == MagickFalse) atDepth = MagickFalse; if ((atDepth != MagickFalse) && (GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green), range) == MagickFalse) atDepth = MagickFalse; if ((atDepth != MagickFalse) && (GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue), range) == MagickFalse) atDepth = MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth = current_depth[0]; for (i = 1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth = current_depth[i]; current_depth = (size_t *) RelinquishMagickMemory(current_depth); return (depth); } image_view = AcquireVirtualCacheView(image, exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL * QuantumRange) <= MaxMap) { size_t * depth_map; /* * Scale pixels to desired (optimized with depth map). */ depth_map = (size_t *) AcquireQuantumMemory(MaxMap + 1, sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); for (i = 0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth = 1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range = GetQuantumRange(depth); pixel = (Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel, range), range)) break; } depth_map[i] = depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum * magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) continue; for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id]) current_depth[id] = depth_map[ScaleQuantumToMap(p[i])]; } p += GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status = MagickFalse; } image_view = DestroyCacheView(image_view); depth = current_depth[0]; for (i = 1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth = current_depth[i]; depth_map = (size_t *) RelinquishMagickMemory(depth_map); current_depth = (size_t *) RelinquishMagickMemory(current_depth); return (depth); } #endif /* * Compute pixel depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum * magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) continue; for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel = GetPixelChannelChannel(image, i); traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { QuantumAny range; range = GetQuantumRange(current_depth[id]); if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i], range), range)) break; current_depth[id]++; } } p += GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status = MagickFalse; } image_view = DestroyCacheView(image_view); depth = current_depth[0]; for (i = 1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth = current_depth[i]; current_depth = (size_t *) RelinquishMagickMemory(current_depth); return (depth); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e Q u a n t u m D e p t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageQuantumDepth() returns the depth of the image rounded to * a legal % quantum depth: 8, 16, or 32. % % The format of the * GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const * Image *image, % const MagickBooleanType constrain) % % A * description of each parameter follows: % % o image: the image. % % o * constrain: A value other than MagickFalse, constrains the depth to % * a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image * image, const MagickBooleanType constrain) { size_t depth; depth = image->depth; if (depth <= 8) depth = 8; else if (depth <= 16) depth = 16; else if (depth <= 32) depth = 32; else if (depth <= 64) depth = 64; if (constrain != MagickFalse) depth = (size_t) MagickMin((double)depth, (double)MAGICKCORE_QUANTUM_DEPTH); return (depth); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e T y p e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageType() returns the type of image: % % Bilevel * Grayscale GrayscaleMatte % Palette PaletteMatte * TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % * % The format of the GetImageType method is: % % ImageType * GetImageType(const Image *image) % % A description of each parameter * follows: % % o image: the image. % */ MagickExport ImageType GetImageType(const Image * image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return (ColorSeparationType); return (ColorSeparationAlphaType); } if (IsImageMonochrome(image) != MagickFalse) return (BilevelType); if (IsImageGray(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return (GrayscaleAlphaType); return (GrayscaleType); } if (IsPaletteImage(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return (PaletteAlphaType); return (PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return (TrueColorAlphaType); return (TrueColorType); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I d e n t i f y I m a g e G r a y * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IdentifyImageGray() returns grayscale if all the pixels in the * image have % the same red, green, and blue intensities, and bi-level is * the intensity is % either 0 or QuantumRange. Otherwise undefined is * returned. % % The format of the IdentifyImageGray method is: % % * ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % * % A description of each parameter follows: % % o image: the image. % % * o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image * image, ExceptionInfo * exception) { CacheView * image_view; ImageType type; register const Quantum * p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return (image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return (UndefinedType); type = BilevelType; image_view = AcquireVirtualCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(image, p) == MagickFalse) { type = UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(image, p) == MagickFalse)) type = GrayscaleType; p += GetPixelChannels(image); } if (type == UndefinedType) break; } image_view = DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait)) type = GrayscaleAlphaType; return (type); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I d e n t i f y I m a g e M o n o c h r o m e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels * in the image % have the same red, green, and blue intensities and the * intensity is either % 0 or QuantumRange. % % The format of the * IdentifyImageMonochrome method is: % % MagickBooleanType * IdentifyImageMonochrome(const Image *image, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o exception: return any errors or warnings in this * structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image * image, ExceptionInfo * exception) { CacheView * image_view; MagickBooleanType bilevel; register ssize_t x; register const Quantum * p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->type == BilevelType) return (MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return (MagickFalse); bilevel = MagickTrue; image_view = AcquireVirtualCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(image, p) == MagickFalse) { bilevel = MagickFalse; break; } p += GetPixelChannels(image); } if (bilevel == MagickFalse) break; } image_view = DestroyCacheView(image_view); return (bilevel); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I d e n t i f y I m a g e T y p e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IdentifyImageType() returns the potential type of image: % % * Bilevel Grayscale GrayscaleMatte % Palette * PaletteMatte TrueColor % TrueColorMatte ColorSeparation * ColorSeparationMatte % % To ensure the image type matches its potential, * use SetImageType(): % % (void) * SetImageType(image,IdentifyImageType(image,exception),exception); % % The * format of the IdentifyImageType method is: % % ImageType * IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image * image, ExceptionInfo * exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return (ColorSeparationType); return (ColorSeparationAlphaType); } if (IdentifyImageMonochrome(image, exception) != MagickFalse) return (BilevelType); if (IdentifyImageGray(image, exception) != UndefinedType) { if (image->alpha_trait != UndefinedPixelTrait) return (GrayscaleAlphaType); return (GrayscaleType); } if (IdentifyPaletteImage(image, exception) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return (PaletteAlphaType); return (PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return (TrueColorAlphaType); return (TrueColorType); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I s I m a g e G r a y * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IsImageGray() returns MagickTrue if the type of the image is * grayscale or % bi-level. % % The format of the IsImageGray method is: % * % MagickBooleanType IsImageGray(const Image *image) % % A * description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageGray(const Image * image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return (MagickTrue); return (MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I s I m a g e M o n o c h r o m e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IsImageMonochrome() returns MagickTrue if type of the image is * bi-level. % % The format of the IsImageMonochrome method is: % % * MagickBooleanType IsImageMonochrome(const Image *image) % % A description * of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageMonochrome(const Image * image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return (MagickTrue); return (MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I s I m a g e O p a q u e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IsImageOpaque() returns MagickTrue if none of the pixels in the * image have % an alpha value other than OpaqueAlpha (QuantumRange). % % * Will return true immediatally is alpha channel is not available. % % The * format of the IsImageOpaque method is: % % MagickBooleanType * IsImageOpaque(const Image *image, % ExceptionInfo *exception) % % * A description of each parameter follows: % % o image: the image. % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImageOpaque(const Image * image, ExceptionInfo * exception) { CacheView * image_view; register const Quantum * p; register ssize_t x; ssize_t y; /* * Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->alpha_trait == UndefinedPixelTrait) return (MagickTrue); image_view = AcquireVirtualCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image, p) != OpaqueAlpha) break; p += GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; } image_view = DestroyCacheView(image_view); return (y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e D e p t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageDepth() sets the depth of the image. % % The format of * the SetImageDepth method is: % % MagickBooleanType * SetImageDepth(Image *image,const size_t depth, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o channel: the channel. % % o depth: the image depth. * % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageDepth(Image * image, const size_t depth, ExceptionInfo * exception) { CacheView * image_view; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth = depth; return (MagickTrue); } range = GetQuantumRange(depth); if (image->storage_class == PseudoClass) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i = 0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red = (double)ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red), range), range); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green = (double)ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green), range), range); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue = (double)ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue), range), range); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha = (double)ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].alpha), range), range); } } status = MagickTrue; image_view = AcquireAuthenticCacheView(image, exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL * QuantumRange) <= MaxMap) { Quantum * depth_map; register ssize_t i; /* * Scale pixels to desired (optimized with depth map). */ depth_map = (Quantum *) AcquireQuantumMemory(MaxMap + 1, sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); for (i = 0; i <= (ssize_t) MaxMap; i++) depth_map[i] = ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i, range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel = GetPixelChannelChannel(image, i); traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = depth_map[ScaleQuantumToMap(q[i])]; } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) { status = MagickFalse; continue; } } image_view = DestroyCacheView(image_view); depth_map = (Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth = depth; return (status); } #endif /* * Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel = GetPixelChannelChannel(image, i); traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i] = ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType) q[i]), range), range); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) { status = MagickFalse; continue; } } image_view = DestroyCacheView(image_view); if (status != MagickFalse) image->depth = depth; return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e T y p e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageType() sets the type of image. Choose from these types: * % % Bilevel Grayscale GrayscaleMatte % Palette * PaletteMatte TrueColor % TrueColorMatte ColorSeparation * ColorSeparationMatte % OptimizeType % % The format of the * SetImageType method is: % % MagickBooleanType SetImageType(Image * *image,const ImageType type, % ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * type: Image type. % % o exception: return any errors or warnings in * this structure. % */ MagickExport MagickBooleanType SetImageType(Image * image, const ImageType type, ExceptionInfo * exception) { const char *artifact; ImageInfo * image_info; MagickBooleanType status; QuantizeInfo * quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickCoreSignature); status = MagickTrue; image_info = AcquireImageInfo(); image_info->dither = image->dither; artifact = GetImageArtifact(image, "dither"); if (artifact != (const char *)NULL) (void)SetImageOption(image_info, "dither", artifact); switch (type) { case BilevelType: { status = TransformImageColorspace(image, GRAYColorspace, exception); (void)NormalizeImage(image, exception); quantize_info = AcquireQuantizeInfo(image_info); quantize_info->number_colors = 2; quantize_info->colorspace = GRAYColorspace; status = QuantizeImage(quantize_info, image, exception); quantize_info = DestroyQuantizeInfo(quantize_info); image->alpha_trait = UndefinedPixelTrait; break; } case GrayscaleType: { status = TransformImageColorspace(image, GRAYColorspace, exception); image->alpha_trait = UndefinedPixelTrait; break; } case GrayscaleAlphaType: { status = TransformImageColorspace(image, GRAYColorspace, exception); if (image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); break; } case PaletteType: { status = TransformImageColorspace(image, sRGBColorspace, exception); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info = AcquireQuantizeInfo(image_info); quantize_info->number_colors = 256; status = QuantizeImage(quantize_info, image, exception); quantize_info = DestroyQuantizeInfo(quantize_info); } image->alpha_trait = UndefinedPixelTrait; break; } case PaletteBilevelAlphaType: { ChannelType channel_mask; status = TransformImageColorspace(image, sRGBColorspace, exception); if (image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); channel_mask = SetImageChannelMask(image, AlphaChannel); (void)BilevelImage(image, (double)QuantumRange / 2.0, exception); (void)SetImageChannelMask(image, channel_mask); quantize_info = AcquireQuantizeInfo(image_info); status = QuantizeImage(quantize_info, image, exception); quantize_info = DestroyQuantizeInfo(quantize_info); break; } case PaletteAlphaType: { status = TransformImageColorspace(image, sRGBColorspace, exception); if (image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); quantize_info = AcquireQuantizeInfo(image_info); quantize_info->colorspace = TransparentColorspace; status = QuantizeImage(quantize_info, image, exception); quantize_info = DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { status = TransformImageColorspace(image, sRGBColorspace, exception); if (image->storage_class != DirectClass) status = SetImageStorageClass(image, DirectClass, exception); image->alpha_trait = UndefinedPixelTrait; break; } case TrueColorAlphaType: { status = TransformImageColorspace(image, sRGBColorspace, exception); if (image->storage_class != DirectClass) status = SetImageStorageClass(image, DirectClass, exception); if (image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); break; } case ColorSeparationType: { status = TransformImageColorspace(image, CMYKColorspace, exception); if (image->storage_class != DirectClass) status = SetImageStorageClass(image, DirectClass, exception); image->alpha_trait = UndefinedPixelTrait; break; } case ColorSeparationAlphaType: { status = TransformImageColorspace(image, CMYKColorspace, exception); if (image->storage_class != DirectClass) status = SetImageStorageClass(image, DirectClass, exception); if (image->alpha_trait == UndefinedPixelTrait) status = SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); break; } case OptimizeType: case UndefinedType: break; } image_info = DestroyImageInfo(image_info); if (status == MagickFalse) return (status); image->type = type; return (MagickTrue); }
aux_cnn.h
#include <dirent.h> #include <sys/types.h> #include <cvpp/containers/matrix.h> #include <cvpp/containers/vector.h> #include <cvpp/containers/image.h> #include <cvpp/properties/pose.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> using namespace cvpp; Seq<String> get_files( const String& dir , const int& n = 0 ) { DIR *dp; struct dirent *dirp; Seq<String> files; if( ( dp = opendir( dir.c_str() ) ) == NULL ) disp( "Error Opening" , dir ); while( ( dirp = readdir( dp ) ) != NULL) { String file( dirp->d_name ); if( file[ file.size() - 4 ] == '.' ) files.push_back( dir + file ); } closedir( dp ); std::sort( files.begin() , files.end() ); if( n > 0 ) files.resize( n ); return files; } Matf load_vel2cam( const String& file ) { String line; std::ifstream infile( file + "/calib_velo_to_cam.txt" ); float R[9] , t[3]; while( std::getline( infile , line ) ) { if( line[0] == 'R' && line[1] == ':' ) tokenFloat( line.c_str() , R , ' ' ); if( line[0] == 'T' && line[1] == ':' ) tokenFloat( line.c_str() , t , ' ' ); } Matf T( 4 , 4 ); T.eig() << R[0] , R[1] , R[2] , t[0] , R[3] , R[4] , R[5] , t[1] , R[6] , R[7] , R[8] , t[2] , 0.0 , 0.0 , 0.0 , 1.0 ; return T.t(); } Matf load_imu2vel( const String& file ) { String line; std::ifstream infile( file + "/calib_imu_to_velo.txt" ); float R[9] , t[3]; while( std::getline( infile , line ) ) { if( line[0] == 'R' && line[1] == ':' ) tokenFloat( line.c_str() , R , ' ' ); if( line[0] == 'T' && line[1] == ':' ) tokenFloat( line.c_str() , t , ' ' ); } Matf T( 4 , 4 ); T.eig() << R[0] , R[1] , R[2] , t[0] , R[3] , R[4] , R[5] , t[1] , R[6] , R[7] , R[8] , t[2] , 0.0 , 0.0 , 0.0 , 1.0 ; return T.t(); } void load_cam2cam( const String& dir , Matf& K , Matf& D , Matf& R , Matf& P ) { String file = dir + "/calib_cam_to_cam.txt"; String line; std::ifstream infile( file ); float k[9] , d[5] , r[9] , p[12]; while( std::getline( infile , line ) ) { if( line.substr(0,4).compare( "K_02" ) == 0 ) tokenFloat( line.substr(5).c_str() , k , ' ' ); if( line.substr(0,4).compare( "D_02" ) == 0 ) tokenFloat( line.substr(5).c_str() , d , ' ' ); if( line.substr(0,9).compare( "R_rect_00" ) == 0 ) tokenFloat( line.substr(10).c_str() , r , ' ' ); if( line.substr(0,9).compare( "P_rect_02" ) == 0 ) tokenFloat( line.substr(10).c_str() , p , ' ' ); } K.reset( 3 , 3 ); forLOOPij( K.r() , K.c() ) K(i,j) = k[ i * K.c() + j ]; D.reset( 5 ); forLOOPi( D.r() ) D(i) = d[ i ]; R.reset( 4 , 4 ).setIdentity(); forLOOPij( 3 , 3 ) R(i,j) = r[ i * 3 + j ]; R.blu(3) = R.blu(3).t(); P.reset( 3 , 4 ); forLOOPij( P.r() , P.c() ) P(i,j) = p[ i * P.c() + j ]; P = P.t(); } SeqMatd load_vel( const Seq<String>& files ) { int n = files.size(); SeqMatd vels( n ); int base = 1000000; float *data = (float*)malloc( base * sizeof(float) ); forLOOPi( n ) { float *px = data + 0 , *py = data + 1; float *pz = data + 2 , *pr = data + 3; FILE *stream; stream = fopen( files[i].c_str() , "rb" ); int num = fread( data , sizeof(float) , base , stream ) / 4; vels[i].reset( num , 4 ); forLOOPj( num ) { vels[i].row(j) << *px , *py , *pz , *pr ; px += 4 ; py += 4 ; pz += 4 ; pr += 4 ; } fclose( stream ); } return vels; } SeqImg3c load_img( const Seq<String>& files ) { int n = files.size(); SeqImg3c imgs( n ); #pragma omp parallel for forLOOPi( n ) { imgs[i].load( files[i] ); } return imgs; } SeqPosef load_pos( const Seq<String>& files ) { int n = files.size(); Matf data( n , 30 ); forLOOPi( n ) { float vals[30]; std::ifstream infile( files[i] ); String line; while( std::getline( infile , line ) ) tokenFloat( ( ' ' + line ).c_str() , vals , ' ' ); forLOOPj( data.c() ) data(i,j) = vals[j]; } float lat0 = data(0,0); float r = 6378137 , s = std::cos( lat0 * PI / 180.0 ); float sr = s * r; Matf xyz( n , 6 ); forLOOPi( xyz.r() ) { float lat = data(i,0) , lon = data(i,1); float z = data(i,2) , r = data(i,3) , p = data(i,4) , w = data(i,5); float x = sr * PI * lon / 180.0; float y = sr * std::log( std::tan( PI * ( 90.0 + lat ) / 360.0 ) ); xyz.row(i) << x , y , z , r , p , w; } Matf off = xyz.cl(3).r(0).clone(); xyz.cl(3) -= off; SeqPosef poses( n ); forLOOPi( poses.size() ) poses[i].setPose( xyz.r(i) ); return poses; } void filter_img( Matf& X , const int& r , const int& c ) { Veci idx; forLOOPi( X.r() ) { if( X(i,2) > 0.0 ) if( X(i,0) > 0.0 && X(i,0) < c * X(i,2) && X(i,1) > 0.0 && X(i,1) < r * X(i,2) ) idx.insert( i ); }; idx.update(); X.SampleRows( idx ); } Matf color_pts( const Matf& uv , const Img3c& img ) { Matf clr( uv.r() , 3 ); forLOOPi( clr.r() ) { clr.row(i) << img( uv(i,1) , uv(i,0) , 2 ) , img( uv(i,1) , uv(i,0) , 1 ) , img( uv(i,1) , uv(i,0) , 0 ) ; } return clr / 255.0; } void process_frame( const SeqPosef& poses , const SeqMatd& vels , const int& t , const Matf& TRP , const Matf& iPRT ,const int& r , const int& c , Matf& vel , Matf& xyz , Matf& proj , Matf& uv , Matf& dep ) { vel = poses[t].o2w( vels[t].cl(3).toFloat() ); // proj = poses[t].w2o( vel ).appR1() * TRP; proj = vels[t].cl(3).toFloat().appR1() * TRP; filter_img( proj , r , c ); // dep = proj.c(2) , uv = proj.cl(2) / dep; // dep.AddRand( -0.5 , +0.5 ); // xyz = poses[t].o2w( ( ( uv.appR1() % dep ).appR1() * iPRT ).cl(3) ); dep = proj.c(2) , uv = proj.cl(2) / dep; dep.AddRand( 0 , +0.25 ); Matf xyz1 = poses[t].o2w( ( ( uv.appR1() % ( dep ) ).appR1() * iPRT ).cl(3) ); Matf xyz2 = poses[t].o2w( ( ( uv.appR1() % ( dep + 0.25 ) ).appR1() * iPRT ).cl(3) ); Matf xyz3 = poses[t].o2w( ( ( uv.appR1() % ( dep + 0.50 ) ).appR1() * iPRT ).cl(3) ); Matf xyz4 = poses[t].o2w( ( ( uv.appR1() % ( dep + 0.75 ) ).appR1() * iPRT ).cl(3) ); Matf xyz5 = poses[t].o2w( ( ( uv.appR1() % ( dep + 1.00 ) ).appR1() * iPRT ).cl(3) ); Matf xyz6 = poses[t].o2w( ( ( uv.appR1() % ( dep + 1.25 ) ).appR1() * iPRT ).cl(3) ); Matf xyz7 = poses[t].o2w( ( ( uv.appR1() % ( dep + 1.50 ) ).appR1() * iPRT ).cl(3) ); xyz = xyz1 | xyz2 | xyz3 | xyz4 | xyz5 | xyz6 | xyz7; } void create_images( const Matf& uv , const Matf& dep , Img1f& dspf , Img1c& dspc , const double& mlt = 3.0 ) { dspf.setVal( 0.0 ); forLOOPj( uv.r() ) { double val = mlt * dep(j); if( val > 255.0 ) val = 0.0; int v = std::floor( uv(j,1) ); int u = std::floor( uv(j,0) ); if( dspf(v,u) == 0 ) dspf(v,u) = val; else if( dspf(v,u) > val ) dspf(v,u) = val; } dspc = dspf.toUChar(); } void prep_dirs( const String& dir ) { String path; struct stat st = {0}; path = dir + "/proc"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc/imgs"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc/dispc"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc/dispf"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc/vel"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc/xyz"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc/uv"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc/dep"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); } void save_data( const String& suf , const String& dir , const String& path , const Img3c& img , const Img1f& dspf , const Img1c& dspc , const Matf& vel , const Matf& xyz , const Matf& uv , const Matf& dep ) { int nn = path.length() , n = 0; while( path[ nn - n ] != '/' ) n++; n--; String name = path.substr( nn - n , n -4 ); String sufname = suf + name; disp( sufname ); img.saveIMG( dir + "/proc/imgs/" + sufname + ".png" ); dspc.saveIMG( dir + "/proc/dispc/" + sufname + ".png" ); dspf.mat().saveBIN( dir + "/proc/dispf/" + sufname + ".bin" ); vel.saveBIN( dir + "/proc/vel/" + sufname + ".bin" ); xyz.saveBIN( dir + "/proc/xyz/" + sufname + ".bin" ); uv.saveBIN( dir + "/proc/uv/" + sufname + ".bin" ); dep.saveBIN( dir + "/proc/dep/" + sufname + ".bin" ); }
#include <dirent.h> #include <sys/types.h> #include <cvpp/containers/matrix.h> #include <cvpp/containers/vector.h> #include <cvpp/containers/image.h> #include <cvpp/properties/pose.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> using namespace cvpp; Seq < String > get_files(const String & dir, const int &n = 0) { DIR *dp; struct dirent *dirp; Seq < String > files; if ((dp = opendir(dir.c_str())) == NULL) disp("Error Opening", dir); while ((dirp = readdir(dp)) != NULL) { String file(dirp->d_name); if (file[file.size() - 4] == '.') files.push_back(dir + file); } closedir(dp); std: :sort(files.begin(), files.end()); if (n > 0) files.resize(n); return files; } Matf load_vel2cam(const String & file) { String line; std: :ifstream infile(file + "/calib_velo_to_cam.txt"); float R[9], t[3]; while (std: :getline(infile, line)) { if (line[0] == 'R' && line[1] == ':') tokenFloat(line.c_str(), R, ' '); if (line[0] == 'T' && line[1] == ':') tokenFloat(line.c_str(), t, ' '); } Matf T(4, 4); T.eig() << R[0], R[1], R[2], t[0], R[3], R[4], R[5], t[1], R[6], R[7], R[8], t[2], 0.0, 0.0, 0.0, 1.0; return T.t(); } Matf load_imu2vel(const String & file) { String line; std: :ifstream infile(file + "/calib_imu_to_velo.txt"); float R[9], t[3]; while (std: :getline(infile, line)) { if (line[0] == 'R' && line[1] == ':') tokenFloat(line.c_str(), R, ' '); if (line[0] == 'T' && line[1] == ':') tokenFloat(line.c_str(), t, ' '); } Matf T(4, 4); T.eig() << R[0], R[1], R[2], t[0], R[3], R[4], R[5], t[1], R[6], R[7], R[8], t[2], 0.0, 0.0, 0.0, 1.0; return T.t(); } void load_cam2cam(const String & dir, Matf & K, Matf & D, Matf & R, Matf & P) { String file = dir + "/calib_cam_to_cam.txt"; String line; std: :ifstream infile(file); float k[9], d[5], r[9], p[12]; while (std: :getline(infile, line)) { if (line.substr(0, 4).compare("K_02") == 0) tokenFloat(line.substr(5).c_str(), k, ' '); if (line.substr(0, 4).compare("D_02") == 0) tokenFloat(line.substr(5).c_str(), d, ' '); if (line.substr(0, 9).compare("R_rect_00") == 0) tokenFloat(line.substr(10).c_str(), r, ' '); if (line.substr(0, 9).compare("P_rect_02") == 0) tokenFloat(line.substr(10).c_str(), p, ' '); } K.reset(3, 3); forLOOPij(K.r(), K.c()) K(i, j) = k[i * K.c() + j]; D.reset(5); forLOOPi(D.r()) D(i) = d[i]; R.reset(4, 4).setIdentity(); forLOOPij(3, 3) R(i, j) = r[i * 3 + j]; R.blu(3) = R.blu(3).t(); P.reset(3, 4); forLOOPij(P.r(), P.c()) P(i, j) = p[i * P.c() + j]; P = P.t(); } SeqMatd load_vel(const Seq < String > &files) { int n = files.size(); SeqMatd vels(n); int base = 1000000; float *data = (float *)malloc(base * sizeof(float)); forLOOPi(n) { float *px = data + 0, *py = data + 1; float *pz = data + 2, *pr = data + 3; FILE *stream; stream = fopen(files[i].c_str(), "rb"); int num = fread(data, sizeof(float), base, stream) / 4; vels[i].reset(num, 4); forLOOPj(num) { vels[i].row(j) << *px, *py, *pz, *pr; px += 4; py += 4; pz += 4; pr += 4; } fclose(stream); } return vels; } SeqImg3c load_img(const Seq < String > &files) { int n = files.size(); SeqImg3c imgs(n); forLOOPi(n) { imgs[i].load(files[i]); } return imgs; } SeqPosef load_pos(const Seq < String > &files) { int n = files.size(); Matf data(n, 30); forLOOPi(n) { float vals[30]; std: : ifstream infile(files[i]); String line; while (std: :getline(infile, line)) tokenFloat((' ' + line).c_str(), vals, ' '); forLOOPj(data.c()) data(i, j) = vals[j]; } float lat0 = data(0, 0); float r = 6378137, s = std::cos(lat0 * PI / 180.0); float sr = s * r; Matf xyz(n, 6); forLOOPi(xyz.r()) { float lat = data(i, 0), lon = data(i, 1); float z = data(i, 2), r = data(i, 3), p = data(i, 4), w = data(i, 5); float x = sr * PI * lon / 180.0; float y = sr * std::log(std::tan(PI * (90.0 + lat) / 360.0)); xyz.row(i) << x, y, z, r, p, w; } Matf off = xyz.cl(3).r(0).clone(); xyz.cl(3) -= off; SeqPosef poses(n); forLOOPi(poses.size()) poses[i].setPose(xyz.r(i)); return poses; } void filter_img(Matf & X, const int &r, const int &c) { Veci idx; forLOOPi(X.r()) { if (X(i, 2) > 0.0) if (X(i, 0) > 0.0 && X(i, 0) < c * X(i, 2) && X(i, 1) > 0.0 && X(i, 1) < r * X(i, 2)) idx.insert(i); }; idx.update(); X.SampleRows(idx); } Matf color_pts(const Matf & uv, const Img3c & img) { Matf clr(uv.r(), 3); forLOOPi(clr.r()) { clr.row(i) << img(uv(i, 1), uv(i, 0), 2), img(uv(i, 1), uv(i, 0), 1), img(uv(i, 1), uv(i, 0), 0); } return clr / 255.0; } void process_frame(const SeqPosef & poses, const SeqMatd & vels, const int &t, const Matf & TRP, const Matf & iPRT, const int &r, const int &c, Matf & vel, Matf & xyz, Matf & proj, Matf & uv, Matf & dep) { vel = poses[t].o2w(vels[t].cl(3).toFloat()); //proj = poses[t].w2o(vel).appR1() * TRP; proj = vels[t].cl(3).toFloat().appR1() * TRP; filter_img(proj, r, c); //dep = proj.c(2), uv = proj.cl(2) / dep; //dep.AddRand(-0.5, +0.5); //xyz = poses[t].o2w(((uv.appR1() % dep).appR1() * iPRT).cl(3)); dep = proj.c(2), uv = proj.cl(2) / dep; dep.AddRand(0, +0.25); Matf xyz1 = poses[t].o2w(((uv.appR1() % (dep)).appR1() * iPRT).cl(3)); Matf xyz2 = poses[t].o2w(((uv.appR1() % (dep + 0.25)).appR1() * iPRT).cl(3)); Matf xyz3 = poses[t].o2w(((uv.appR1() % (dep + 0.50)).appR1() * iPRT).cl(3)); Matf xyz4 = poses[t].o2w(((uv.appR1() % (dep + 0.75)).appR1() * iPRT).cl(3)); Matf xyz5 = poses[t].o2w(((uv.appR1() % (dep + 1.00)).appR1() * iPRT).cl(3)); Matf xyz6 = poses[t].o2w(((uv.appR1() % (dep + 1.25)).appR1() * iPRT).cl(3)); Matf xyz7 = poses[t].o2w(((uv.appR1() % (dep + 1.50)).appR1() * iPRT).cl(3)); xyz = xyz1 | xyz2 | xyz3 | xyz4 | xyz5 | xyz6 | xyz7; } void create_images(const Matf & uv, const Matf & dep, Img1f & dspf, Img1c & dspc, const double &mlt = 3.0) { dspf.setVal(0.0); forLOOPj(uv.r()) { double val = mlt * dep(j); if (val > 255.0) val = 0.0; int v = std::floor(uv(j, 1)); int u = std::floor(uv(j, 0)); if (dspf(v, u) == 0) dspf(v, u) = val; else if (dspf(v, u) > val) dspf(v, u) = val; } dspc = dspf.toUChar(); } void prep_dirs(const String & dir) { String path; struct stat st = {0}; path = dir + "/proc"; if (stat(path.c_str(), &st) == -1) mkdir(path.c_str(), 0700); path = dir + "/proc/imgs"; if (stat(path.c_str(), &st) == -1) mkdir(path.c_str(), 0700); path = dir + "/proc/dispc"; if (stat(path.c_str(), &st) == -1) mkdir(path.c_str(), 0700); path = dir + "/proc/dispf"; if (stat(path.c_str(), &st) == -1) mkdir(path.c_str(), 0700); path = dir + "/proc/vel"; if (stat(path.c_str(), &st) == -1) mkdir(path.c_str(), 0700); path = dir + "/proc/xyz"; if (stat(path.c_str(), &st) == -1) mkdir(path.c_str(), 0700); path = dir + "/proc/uv"; if (stat(path.c_str(), &st) == -1) mkdir(path.c_str(), 0700); path = dir + "/proc/dep"; if (stat(path.c_str(), &st) == -1) mkdir(path.c_str(), 0700); } void save_data(const String & suf, const String & dir, const String & path, const Img3c & img, const Img1f & dspf, const Img1c & dspc, const Matf & vel, const Matf & xyz, const Matf & uv, const Matf & dep) { int nn = path.length(), n = 0; while (path[nn - n] != '/') n++; n--; String name = path.substr(nn - n, n - 4); String sufname = suf + name; disp(sufname); img.saveIMG(dir + "/proc/imgs/" + sufname + ".png"); dspc.saveIMG(dir + "/proc/dispc/" + sufname + ".png"); dspf.mat().saveBIN(dir + "/proc/dispf/" + sufname + ".bin"); vel.saveBIN(dir + "/proc/vel/" + sufname + ".bin"); xyz.saveBIN(dir + "/proc/xyz/" + sufname + ".bin"); uv.saveBIN(dir + "/proc/uv/" + sufname + ".bin"); dep.saveBIN(dir + "/proc/dep/" + sufname + ".bin"); }
#include <dirent.h> #include <sys/types.h> #include <cvpp/containers/matrix.h> #include <cvpp/containers/vector.h> #include <cvpp/containers/image.h> #include <cvpp/properties/pose.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> using namespace cvpp; Seq < String > get_files(const String & dir, const int &n = 0) { DIR *dp; struct dirent *dirp; Seq < String > files; if ((dp = opendir(dir.c_str())) == NULL) disp("Error Opening", dir); while ((dirp = readdir(dp)) != NULL) { String file(dirp->d_name); if (file[file.size() - 4] == '.') files.push_back(dir + file); } closedir(dp); std: :sort(files.begin(), files.end()); if (n > 0) files.resize(n); return files; } Matf load_vel2cam(const String & file) { String line; std: :ifstream infile(file + "/calib_velo_to_cam.txt"); float R[9], t[3]; while (std: :getline(infile, line)) { if (line[0] == 'R' && line[1] == ':') tokenFloat(line.c_str(), R, ' '); if (line[0] == 'T' && line[1] == ':') tokenFloat(line.c_str(), t, ' '); } Matf T(4, 4); T.eig() << R[0], R[1], R[2], t[0], R[3], R[4], R[5], t[1], R[6], R[7], R[8], t[2], 0.0, 0.0, 0.0, 1.0; return T.t(); } Matf load_imu2vel(const String & file) { String line; std: :ifstream infile(file + "/calib_imu_to_velo.txt"); float R[9], t[3]; while (std: :getline(infile, line)) { if (line[0] == 'R' && line[1] == ':') tokenFloat(line.c_str(), R, ' '); if (line[0] == 'T' && line[1] == ':') tokenFloat(line.c_str(), t, ' '); } Matf T(4, 4); T.eig() << R[0], R[1], R[2], t[0], R[3], R[4], R[5], t[1], R[6], R[7], R[8], t[2], 0.0, 0.0, 0.0, 1.0; return T.t(); } void load_cam2cam(const String & dir, Matf & K, Matf & D, Matf & R, Matf & P) { String file = dir + "/calib_cam_to_cam.txt"; String line; std: :ifstream infile(file); float k[9], d[5], r[9], p[12]; while (std: :getline(infile, line)) { if (line.substr(0, 4).compare("K_02") == 0) tokenFloat(line.substr(5).c_str(), k, ' '); if (line.substr(0, 4).compare("D_02") == 0) tokenFloat(line.substr(5).c_str(), d, ' '); if (line.substr(0, 9).compare("R_rect_00") == 0) tokenFloat(line.substr(10).c_str(), r, ' '); if (line.substr(0, 9).compare("P_rect_02") == 0) tokenFloat(line.substr(10).c_str(), p, ' '); } K.reset(3, 3); forLOOPij(K.r(), K.c()) K(i, j) = k[i * K.c() + j]; D.reset(5); forLOOPi(D.r()) D(i) = d[i]; R.reset(4, 4).setIdentity(); forLOOPij(3, 3) R(i, j) = r[i * 3 + j]; R.blu(3) = R.blu(3).t(); P.reset(3, 4); forLOOPij(P.r(), P.c()) P(i, j) = p[i * P.c() + j]; P = P.t(); } SeqMatd load_vel(const Seq < String > &files) { int n = files.size(); SeqMatd vels(n); int base = 1000000; float *data = (float *)malloc(base * sizeof(float)); forLOOPi(n) { float *px = data + 0, *py = data + 1; float *pz = data + 2, *pr = data + 3; FILE *stream; stream = fopen(files[i].c_str(), "rb"); int num = fread(data, sizeof(float), base, stream) / 4; vels[i].reset(num, 4); forLOOPj(num) { vels[i].row(j) << *px, *py, *pz, *pr; px += 4; py += 4; pz += 4; pr += 4; } fclose(stream); } return vels; } SeqImg3c load_img(const Seq < String > &files) { int n = files.size(); SeqImg3c imgs(n); #pragma omp parallel for forLOOPi(n) { imgs[i].load(files[i]); } return imgs; } SeqPosef load_pos(const Seq < String > &files) { int n = files.size(); Matf data(n, 30); forLOOPi(n) { float vals[30]; std: : ifstream infile(files[i]); String line; while (std: :getline(infile, line)) tokenFloat((' ' + line).c_str(), vals, ' '); forLOOPj(data.c()) data(i, j) = vals[j]; } float lat0 = data(0, 0); float r = 6378137, s = std::cos(lat0 * PI / 180.0); float sr = s * r; Matf xyz(n, 6); forLOOPi(xyz.r()) { float lat = data(i, 0), lon = data(i, 1); float z = data(i, 2), r = data(i, 3), p = data(i, 4), w = data(i, 5); float x = sr * PI * lon / 180.0; float y = sr * std::log(std::tan(PI * (90.0 + lat) / 360.0)); xyz.row(i) << x, y, z, r, p, w; } Matf off = xyz.cl(3).r(0).clone(); xyz.cl(3) -= off; SeqPosef poses(n); forLOOPi(poses.size()) poses[i].setPose(xyz.r(i)); return poses; } void filter_img(Matf & X, const int &r, const int &c) { Veci idx; forLOOPi(X.r()) { if (X(i, 2) > 0.0) if (X(i, 0) > 0.0 && X(i, 0) < c * X(i, 2) && X(i, 1) > 0.0 && X(i, 1) < r * X(i, 2)) idx.insert(i); }; idx.update(); X.SampleRows(idx); } Matf color_pts(const Matf & uv, const Img3c & img) { Matf clr(uv.r(), 3); forLOOPi(clr.r()) { clr.row(i) << img(uv(i, 1), uv(i, 0), 2), img(uv(i, 1), uv(i, 0), 1), img(uv(i, 1), uv(i, 0), 0); } return clr / 255.0; } void process_frame(const SeqPosef & poses, const SeqMatd & vels, const int &t, const Matf & TRP, const Matf & iPRT, const int &r, const int &c, Matf & vel, Matf & xyz, Matf & proj, Matf & uv, Matf & dep) { vel = poses[t].o2w(vels[t].cl(3).toFloat()); //proj = poses[t].w2o(vel).appR1() * TRP; proj = vels[t].cl(3).toFloat().appR1() * TRP; filter_img(proj, r, c); //dep = proj.c(2), uv = proj.cl(2) / dep; //dep.AddRand(-0.5, +0.5); //xyz = poses[t].o2w(((uv.appR1() % dep).appR1() * iPRT).cl(3)); dep = proj.c(2), uv = proj.cl(2) / dep; dep.AddRand(0, +0.25); Matf xyz1 = poses[t].o2w(((uv.appR1() % (dep)).appR1() * iPRT).cl(3)); Matf xyz2 = poses[t].o2w(((uv.appR1() % (dep + 0.25)).appR1() * iPRT).cl(3)); Matf xyz3 = poses[t].o2w(((uv.appR1() % (dep + 0.50)).appR1() * iPRT).cl(3)); Matf xyz4 = poses[t].o2w(((uv.appR1() % (dep + 0.75)).appR1() * iPRT).cl(3)); Matf xyz5 = poses[t].o2w(((uv.appR1() % (dep + 1.00)).appR1() * iPRT).cl(3)); Matf xyz6 = poses[t].o2w(((uv.appR1() % (dep + 1.25)).appR1() * iPRT).cl(3)); Matf xyz7 = poses[t].o2w(((uv.appR1() % (dep + 1.50)).appR1() * iPRT).cl(3)); xyz = xyz1 | xyz2 | xyz3 | xyz4 | xyz5 | xyz6 | xyz7; } void create_images(const Matf & uv, const Matf & dep, Img1f & dspf, Img1c & dspc, const double &mlt = 3.0) { dspf.setVal(0.0); forLOOPj(uv.r()) { double val = mlt * dep(j); if (val > 255.0) val = 0.0; int v = std::floor(uv(j, 1)); int u = std::floor(uv(j, 0)); if (dspf(v, u) == 0) dspf(v, u) = val; else if (dspf(v, u) > val) dspf(v, u) = val; } dspc = dspf.toUChar(); } void prep_dirs(const String & dir) { String path; struct stat st = {0}; path = dir + "/proc"; if (stat(path.c_str(), &st) == -1) mkdir(path.c_str(), 0700); path = dir + "/proc/imgs"; if (stat(path.c_str(), &st) == -1) mkdir(path.c_str(), 0700); path = dir + "/proc/dispc"; if (stat(path.c_str(), &st) == -1) mkdir(path.c_str(), 0700); path = dir + "/proc/dispf"; if (stat(path.c_str(), &st) == -1) mkdir(path.c_str(), 0700); path = dir + "/proc/vel"; if (stat(path.c_str(), &st) == -1) mkdir(path.c_str(), 0700); path = dir + "/proc/xyz"; if (stat(path.c_str(), &st) == -1) mkdir(path.c_str(), 0700); path = dir + "/proc/uv"; if (stat(path.c_str(), &st) == -1) mkdir(path.c_str(), 0700); path = dir + "/proc/dep"; if (stat(path.c_str(), &st) == -1) mkdir(path.c_str(), 0700); } void save_data(const String & suf, const String & dir, const String & path, const Img3c & img, const Img1f & dspf, const Img1c & dspc, const Matf & vel, const Matf & xyz, const Matf & uv, const Matf & dep) { int nn = path.length(), n = 0; while (path[nn - n] != '/') n++; n--; String name = path.substr(nn - n, n - 4); String sufname = suf + name; disp(sufname); img.saveIMG(dir + "/proc/imgs/" + sufname + ".png"); dspc.saveIMG(dir + "/proc/dispc/" + sufname + ".png"); dspf.mat().saveBIN(dir + "/proc/dispf/" + sufname + ".bin"); vel.saveBIN(dir + "/proc/vel/" + sufname + ".bin"); xyz.saveBIN(dir + "/proc/xyz/" + sufname + ".bin"); uv.saveBIN(dir + "/proc/uv/" + sufname + ".bin"); dep.saveBIN(dir + "/proc/dep/" + sufname + ".bin"); }
omp_app.c
/* for affinity functions */ #define _GNU_SOURCE #include <sched.h> #include <stdlib.h> #include <unistd.h> #include <assert.h> #include <omp.h> #include <stdio.h> void debug_affinity(void); int main(int argc, char **argv) { printf("omp_num_procs: %d (available cpus)\n", omp_get_num_procs()); printf("omp_max_threads: %d (allowed threads)\n", omp_get_max_threads()); printf("omp_num_threads: %d (threads in current block)\n", omp_get_num_threads()); printf("omp_thread_num: %d (id of main thread)\n", omp_get_thread_num()); debug_affinity(); #pragma omp parallel printf("%d/%d thread ready\n", omp_get_thread_num(), omp_get_num_procs()); return 0; } void debug_affinity(void) { cpu_set_t *cs; int count, size, i, first; cs = CPU_ALLOC(CPU_SETSIZE); assert(cs != NULL); size = CPU_ALLOC_SIZE(CPU_SETSIZE); CPU_ZERO_S(size, cs); sched_getaffinity(0, size, cs); count = CPU_COUNT(cs); first = 1; printf("cpu affinity (%d count): ", count); for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, cs)) { if (!first) printf(","); printf("%d", i); first = 0; } } printf("\n"); CPU_FREE(cs); }
/* for affinity functions */ #define _GNU_SOURCE #include <sched.h> #include <stdlib.h> #include <unistd.h> #include <assert.h> #include <omp.h> #include <stdio.h> void debug_affinity(void); int main(int argc, char **argv) { printf("omp_num_procs: %d (available cpus)\n", omp_get_num_procs()); printf("omp_max_threads: %d (allowed threads)\n", omp_get_max_threads()); printf("omp_num_threads: %d (threads in current block)\n", omp_get_num_threads()); printf("omp_thread_num: %d (id of main thread)\n", omp_get_thread_num()); debug_affinity(); printf("%d/%d thread ready\n", omp_get_thread_num(), omp_get_num_procs()); return 0; } void debug_affinity(void) { cpu_set_t *cs; int count, size, i, first; cs = CPU_ALLOC(CPU_SETSIZE); assert(cs != NULL); size = CPU_ALLOC_SIZE(CPU_SETSIZE); CPU_ZERO_S(size, cs); sched_getaffinity(0, size, cs); count = CPU_COUNT(cs); first = 1; printf("cpu affinity (%d count): ", count); for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, cs)) { if (!first) printf(","); printf("%d", i); first = 0; } } printf("\n"); CPU_FREE(cs); }
/* for affinity functions */ #define _GNU_SOURCE #include <sched.h> #include <stdlib.h> #include <unistd.h> #include <assert.h> #include <omp.h> #include <stdio.h> void debug_affinity(void); int main(int argc, char **argv) { printf("omp_num_procs: %d (available cpus)\n", omp_get_num_procs()); printf("omp_max_threads: %d (allowed threads)\n", omp_get_max_threads()); printf("omp_num_threads: %d (threads in current block)\n", omp_get_num_threads()); printf("omp_thread_num: %d (id of main thread)\n", omp_get_thread_num()); debug_affinity(); #pragma omp parallel printf("%d/%d thread ready\n", omp_get_thread_num(), omp_get_num_procs()); return 0; } void debug_affinity(void) { cpu_set_t *cs; int count, size, i, first; cs = CPU_ALLOC(CPU_SETSIZE); assert(cs != NULL); size = CPU_ALLOC_SIZE(CPU_SETSIZE); CPU_ZERO_S(size, cs); sched_getaffinity(0, size, cs); count = CPU_COUNT(cs); first = 1; printf("cpu affinity (%d count): ", count); for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, cs)) { if (!first) printf(","); printf("%d", i); first = 0; } } printf("\n"); CPU_FREE(cs); }
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/draw.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/memory_.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/resample.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImageChannel() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImageChannel method is: % % MagickBooleanType CompositeImage(Image *image, % const CompositeOperator compose,Image *source_image, % const ssize_t x_offset,const ssize_t y_offset) % MagickBooleanType CompositeImageChannel(Image *image, % const ChannelType channel,const CompositeOperator compose, % Image *source_image,const ssize_t x_offset,const ssize_t y_offset) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o channel: the channel. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o source_image: the composite (source) image. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'source_image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o "compose:outside-overlay" % Modify how the composition is to effect areas not directly covered % by the 'source_image' at the offset given. Normally this is % dependant on the 'compose' method, especially Duff-Porter methods. % % If set to "false" then disable all normal handling of pixels not % covered by the source_image. Typically used for repeated tiling % of the source_image by the calling API. % % Previous to IM v6.5.3-3 this was called "modify-outside-overlay" % */ /* ** Programmers notes on SVG specification. ** ** A Composition is defined by... ** Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors ** Blending areas : X = 1 for area of overlap ie: f(Sc,Dc) ** Y = 1 for source preserved ** Z = 1 for canvas preserved ** ** Conversion to transparency (then optimized) ** Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) ** Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) ** ** Where... ** Sca = Sc*Sa normalized Source color divided by Source alpha ** Dca = Dc*Da normalized Dest color divided by Dest alpha ** Dc' = Dca'/Da' the desired color value for this channel. ** ** Da' in in the follow formula as 'gamma' The resulting alpla value. ** ** ** Most functions use a blending mode of over (X=1,Y=1,Z=1) ** this results in the following optimizations... ** gamma = Sa+Da-Sa*Da; ** gamma = 1 - QuantiumScale*alpha * QuantiumScale*beta; ** opacity = QuantiumScale*alpha*beta; // over blend, optimized 1-Gamma ** ** The above SVG definitions also definate that Mathematical Composition ** methods should use a 'Over' blending mode for Alpha Channel. ** It however was not applied for composition modes of 'Plus', 'Minus', ** the modulus versions of 'Add' and 'Subtract'. ** ** ** Mathematical operator changes to be applied from IM v6.7... ** ** 1/ Modulus modes 'Add' and 'Subtract' are obsoleted and renamed ** 'ModulusAdd' and 'ModulusSubtract' for clarity. ** ** 2/ All mathematical compositions work as per the SVG specification ** with regard to blending. This now includes 'ModulusAdd' and ** 'ModulusSubtract'. ** ** 3/ When the special channel flag 'sync' (syncronize channel updates) ** is turned off (enabled by default) then mathematical compositions are ** only performed on the channels specified, and are applied ** independantally of each other. In other words the mathematics is ** performed as 'pure' mathematical operations, rather than as image ** operations. */ static inline MagickRealType Atop(const MagickRealType p, const MagickRealType Sa,const MagickRealType q, const MagickRealType magick_unused(Da)) { magick_unreferenced(Da); return(p*Sa+q*(1.0-Sa)); /* Da optimized out, Da/gamma => 1.0 */ } static inline void CompositeAtop(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ composite->opacity=q->opacity; /* optimized Da = 1.0-Gamma */ composite->red=Atop(p->red,Sa,q->red,1.0); composite->green=Atop(p->green,Sa,q->green,1.0); composite->blue=Atop(p->blue,Sa,q->blue,1.0); if (q->colorspace == CMYKColorspace) composite->index=Atop(p->index,Sa,q->index,1.0); } /* What is this Composition method for? Can't find any specification! WARNING this is not doing correct 'over' blend handling (Anthony Thyssen). */ static inline void CompositeBumpmap(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType intensity; intensity=MagickPixelIntensity(p); composite->red=QuantumScale*intensity*q->red; composite->green=QuantumScale*intensity*q->green; composite->blue=QuantumScale*intensity*q->blue; composite->opacity=(MagickRealType) QuantumScale*intensity*p->opacity; if (q->colorspace == CMYKColorspace) composite->index=QuantumScale*intensity*q->index; } static inline void CompositeClear(const MagickPixelPacket *q, MagickPixelPacket *composite) { composite->opacity=(MagickRealType) TransparentOpacity; composite->red=0.0; composite->green=0.0; composite->blue=0.0; if (q->colorspace == CMYKColorspace) composite->index=0.0; } static MagickRealType ColorBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { #if 0 /* Oct 2004 SVG specification. */ if (Sca*Da + Dca*Sa <= Sa*Da) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*(Sca*Da+Dca*Sa-Sa*Da)/Sca + Sca*(1.0-Da) + Dca*(1.0-Sa)); #else /* March 2009 SVG specification. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca-Da) < MagickEpsilon)) return(Sa*Da+Dca*(1.0-Sa)); if (Sca < MagickEpsilon) return(Dca*(1.0-Sa)); return(Sa*Da-Sa*MagickMin(Da,(Da-Dca)*Sa/Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #endif } static inline void CompositeColorBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType ColorDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* Oct 2004 SVG specification. */ if ((Sca*Da+Dca*Sa) >= Sa*Da) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #if 0 /* New specification, March 2009 SVG specification. This specification was also wrong of non-overlap cases. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*MagickMin(Da,Dca*Sa/(Sa-Sca))); #endif #if 0 /* Working from first principles using the original formula: f(Sc,Dc) = Dc/(1-Sc) This works correctly! Looks like the 2004 model was right but just required a extra condition for correct handling. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #endif } static inline void CompositeColorDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorDodge(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorDodge(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorDodge(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorDodge(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Darken(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p < q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeDarken(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Darken(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Darken(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Darken(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Darken(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMax(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMin(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMin(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMin(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMin(p->index,q->index); } } static inline void CompositeDarkenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use intensity only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) < Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) < MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } static inline MagickRealType Difference(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { /* Optimized by Multipling by QuantumRange (taken from gamma). */ return(Sa*p+Da*q-Sa*Da*2.0*MagickMin(p,q)); } static inline void CompositeDifference(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); /* Values are not normalized as an optimization. */ composite->red=gamma*Difference(p->red,Sa,q->red,Da); composite->green=gamma*Difference(p->green,Sa,q->green,Da); composite->blue=gamma*Difference(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Difference(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-fabs((double) (p->opacity-q->opacity)); if ( (channel & RedChannel) != 0 ) composite->red=fabs((double) (p->red-q->red)); if ( (channel & GreenChannel) != 0 ) composite->green=fabs((double) (p->green-q->green)); if ( (channel & BlueChannel) != 0 ) composite->blue=fabs((double) (p->blue-q->blue)); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=fabs((double) (p->index-q->index)); } } static MagickRealType Divide(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { /* Divide Source by Destination f(Sc,Dc) = Sc / Dc But with appropriate handling for special case of Dc == 0 specifically so that f(Black,Black)=Black and f(non-Black,Black)=White. It is however also important to correctly do 'over' alpha blending which is why the formula becomes so complex. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Dca) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeDivide(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Divide(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Divide(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Divide(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Divide(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Divide(Sa,1.0,Da,1.0)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Divide(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Divide(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Divide(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Divide(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0); } } static MagickRealType Exclusion(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeExclusion(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType gamma, Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Exclusion(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Exclusion(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Exclusion(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Exclusion(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ((channel & AlphaChannel) != 0) composite->opacity=QuantumRange*(1.0-Exclusion(Sa,1.0,Da,1.0)); if ((channel & RedChannel) != 0) composite->red=QuantumRange*Exclusion(QuantumScale*p->red,1.0, QuantumScale*q->red,1.0); if ((channel & GreenChannel) != 0) composite->green=QuantumRange*Exclusion(QuantumScale*p->green,1.0, QuantumScale*q->green,1.0); if ((channel & BlueChannel) != 0) composite->blue=QuantumRange*Exclusion(QuantumScale*p->blue,1.0, QuantumScale*q->blue,1.0); if (((channel & IndexChannel) != 0) && (q->colorspace == CMYKColorspace)) composite->index=QuantumRange*Exclusion(QuantumScale*p->index,1.0, QuantumScale*q->index,1.0); } } static MagickRealType HardLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { if ((2.0*Sca) < Sa) return(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeHardLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*HardLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*HardLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*HardLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType HardMix(const MagickRealType Sca, const MagickRealType Dca) { if ((Sca+Dca) < QuantumRange) return(0.0); else return(1.0); } static inline void CompositeHardMix(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*HardMix(p->red*Sa,q->red*Da); composite->green=gamma*HardMix(p->green*Sa,q->green*Da); composite->blue=gamma*HardMix(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardMix(p->index*Sa,q->index*Da); } static void HCLComposite(const double hue,const double chroma,const double luma, MagickRealType *red,MagickRealType *green,MagickRealType *blue) { double b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,double *hue,double *chroma,double *luma) { double b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (double *) NULL); assert(chroma != (double *) NULL); assert(luma != (double *) NULL); r=(double) red; g=(double) green; b=(double) blue; max=MagickMax(r,MagickMax(g,b)); c=max-(double) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == (MagickRealType) max) h=fmod((g-b)/c+6.0,6.0); else if (green == (MagickRealType) max) h=((b-r)/c)+2.0; else if (blue == (MagickRealType) max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static inline MagickRealType In(const MagickRealType p,const MagickRealType Sa, const MagickRealType magick_unused(q),const MagickRealType Da) { magick_unreferenced(q); return(Sa*p*Da); } static inline void CompositeIn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*Da; composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*In(p->red,Sa,q->red,Da); composite->green=gamma*In(p->green,Sa,q->green,Da); composite->blue=gamma*In(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*In(p->index,Sa,q->index,Da); } static inline MagickRealType Lighten(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p > q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeLighten(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Lighten is also equvalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Lighten(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Lighten(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Lighten(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Lighten(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMin(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMax(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMax(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMax(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMax(p->index,q->index); } } static inline void CompositeLightenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use Intenisty only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) > Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) > MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } #if 0 static inline MagickRealType LinearDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearDodge: simplifies to a trivial formula f(Sc,Dc) = Sc + Dc Dca' = Sca + Dca */ return(Sca+Dca); } #endif static inline void CompositeLinearDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*(p->red*Sa+q->red*Da); composite->green=gamma*(p->green*Sa+q->green*Da); composite->blue=gamma*(p->blue*Sa+q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*(p->index*Sa+q->index*Da); } static inline MagickRealType LinearBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ return(Sca+Dca-Sa*Da); } static inline void CompositeLinearBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType LinearLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { #if 0 /* Previous formula, was only valid for fully-opaque images. */ return(Dca+2*Sca-1.0); #else /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ return((Sca-Sa)*Da+Sca+Dca); #endif } static inline void CompositeLinearLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Mathematics(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da, const GeometryInfo *geometry_info) { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ return(geometry_info->rho*Sca*Dca+geometry_info->sigma*Sca*Da+ geometry_info->xi*Dca*Sa+geometry_info->psi*Sa*Da+Sca*(1.0-Da)+ Dca*(1.0-Sa)); } static inline void CompositeMathematics(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, const GeometryInfo *args, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* ??? - AT */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Mathematics(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da,args); composite->green=gamma*Mathematics(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da,args); composite->blue=gamma*Mathematics(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da,args); if (q->colorspace == CMYKColorspace) composite->index=gamma*Mathematics(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da,args); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Mathematics(Sa,1.0,Da,1.0,args)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Mathematics(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0,args); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Mathematics(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0,args); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Mathematics(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0,args); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Mathematics(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0,args); } } static inline void CompositePlus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { /* NOTE: "Plus" does not use 'over' alpha-blending but uses a special 'plus' form of alph-blending. It is the ONLY mathematical operator to do this. this is what makes it different to the otherwise equivalent "LinearDodge" composition method. Note however that color channels are still effected by the alpha channel as a result of the blending, making it just as useless for independant channel maths, just like all other mathematical composition methods. As such the removal of the 'sync' flag, is still a usful convention. The MagickPixelCompositePlus() function is defined in "composite-private.h" so it can also be used for Image Blending. */ MagickPixelCompositePlus(p,p->opacity,q,q->opacity,composite); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=p->opacity+q->opacity-QuantumRange; if ( (channel & RedChannel) != 0 ) composite->red=p->red+q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green+q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue+q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index+q->index; } } static inline MagickRealType Minus(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca, const MagickRealType magick_unused(Da)) { /* Minus Source from Destination f(Sc,Dc) = Sc - Dc */ magick_unreferenced(Da); return(Sca+Dca-2*Dca*Sa); } static inline void CompositeMinus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Minus(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Minus(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Minus(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Minus(p->index*Sa,Sa,q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-(Sa-Da)); if ( (channel & RedChannel) != 0 ) composite->red=p->red-q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green-q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue-q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index-q->index; } } static inline MagickRealType ModulusAdd(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { MagickRealType pixel; pixel=p+q; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; return(pixel*Sa*Da+p*Sa*(1.0-Da)+q*Da*(1.0-Sa)); } static inline void CompositeModulusAdd(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusAdd(p->red,Sa,q->red,Da); composite->green=ModulusAdd(p->green,Sa,q->green,Da); composite->blue=ModulusAdd(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusAdd(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusAdd(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusAdd(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusAdd(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,1.0,q->index,1.0); } } static inline MagickRealType ModulusSubtract(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { MagickRealType pixel; pixel=p-q; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; return(pixel*Sa*Da+p*Sa*(1.0-Da)+q*Da*(1.0-Sa)); } static inline void CompositeModulusSubtract(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma = RoundToUnity(Sa+Da-Sa*Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusSubtract(p->red,Sa,q->red,Da); composite->green=ModulusSubtract(p->green,Sa,q->green,Da); composite->blue=ModulusSubtract(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusSubtract(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusSubtract(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusSubtract(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusSubtract(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,1.0,q->index,1.0); } } static inline MagickRealType Multiply(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeMultiply(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Multiply(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Multiply(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Multiply(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Multiply(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Sa*Da); if ( (channel & RedChannel) != 0 ) composite->red=QuantumScale*p->red*q->red; if ( (channel & GreenChannel) != 0 ) composite->green=QuantumScale*p->green*q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumScale*p->blue*q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumScale*p->index*q->index; } } static inline MagickRealType Out(const MagickRealType p, const MagickRealType Sa,const MagickRealType magick_unused(q), const MagickRealType Da) { magick_unreferenced(q); return(Sa*p*(1.0-Da)); } static inline void CompositeOut(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*(1.0-Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Out(p->red,Sa,q->red,Da); composite->green=gamma*Out(p->green,Sa,q->green,Da); composite->blue=gamma*Out(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Out(p->index,Sa,q->index,Da); } static MagickRealType PegtopLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc See http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs(Da) < MagickEpsilon) return(Sca); return(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositePegtopLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PegtopLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PegtopLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PegtopLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PegtopLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType PinLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if (Dca*Sa < Da*(2*Sca-Sa)) return(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); if ((Dca*Sa) > (2*Sca*Da)) return(Sca*Da+Sca+Dca*(1.0-Sa)); return(Sca*(1.0-Da)+Dca); } static inline void CompositePinLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PinLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PinLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PinLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PinLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Screen(const MagickRealType Sca, const MagickRealType Dca) { /* Screen: A negated multiply f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ return(Sca+Dca-Sca*Dca); } static inline void CompositeScreen(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); Sa*=(MagickRealType) QuantumScale; Da*=(MagickRealType) QuantumScale; /* optimization */ gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Screen(p->red*Sa,q->red*Da); composite->green=gamma*Screen(p->green*Sa,q->green*Da); composite->blue=gamma*Screen(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Screen(p->index*Sa,q->index*Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Screen(Sa,Da)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange*Screen(QuantumScale*p->red, QuantumScale*q->red); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange*Screen(QuantumScale*p->green, QuantumScale*q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange*Screen(QuantumScale*p->blue, QuantumScale*q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange*Screen(QuantumScale*p->index, QuantumScale*q->index); } } static MagickRealType SoftLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { #if 0 /* Oct 2004 SVG specification -- was found to be incorrect See http://lists.w3.org/Archives/Public/www-svg/2009Feb/0014.html. */ if (2.0*Sca < Sa) return(Dca*(Sa-(1.0-Dca/Da)*(2.0*Sca-Sa))+Sca*(1.0-Da)+Dca*(1.0-Sa)); if (8.0*Dca <= Da) return(Dca*(Sa-(1.0-Dca/Da)*(2.0*Sca-Sa)*(3.0-8.0*Dca/Da))+ Sca*(1.0-Da)+Dca*(1.0-Sa)); return((Dca*Sa+(pow(Dca/Da,0.5)*Da-Dca)*(2.0*Sca-Sa))+Sca*(1.0-Da)+ Dca*(1.0-Sa)); #else MagickRealType alpha, beta; /* New specification: March 2009 SVG specification. */ alpha=Dca/Da; if ((2.0*Sca) < Sa) return(Dca*(Sa+(2.0*Sca-Sa)*(1.0-alpha))+Sca*(1.0-Da)+Dca*(1.0-Sa)); if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { beta=Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*alpha*(4.0*alpha+1.0)*(alpha-1.0)+7.0* alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); } beta=Dca*Sa+Da*(2.0*Sca-Sa)*(pow(alpha,0.5)-alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); #endif } static inline void CompositeSoftLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*SoftLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*SoftLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*SoftLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*SoftLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } /* Deprecated Multiply difference by amount, if differance larger than threshold??? What use this is is completely unknown The Opacity calculation appears to be inverted -- Anthony Thyssen */ static inline MagickRealType Threshold(const MagickRealType p, const MagickRealType q,const MagickRealType threshold, const MagickRealType amount) { MagickRealType delta; delta=p-q; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) return(q); return(q+delta*amount); } static inline void CompositeThreshold(const MagickPixelPacket *p, const MagickPixelPacket *q,const MagickRealType threshold, const MagickRealType amount,MagickPixelPacket *composite) { composite->red=Threshold(p->red,q->red,threshold,amount); composite->green=Threshold(p->green,q->green,threshold,amount); composite->blue=Threshold(p->blue,q->blue,threshold,amount); composite->opacity=QuantumRange-Threshold(p->opacity,q->opacity, threshold,amount); if (q->colorspace == CMYKColorspace) composite->index=Threshold(p->index,q->index,threshold,amount); } static MagickRealType VividLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs(Sa) < MagickEpsilon) || (fabs(Sca-Sa) < MagickEpsilon)) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); if ((2*Sca) <= Sa) return(Sa*(Da+Sa*(Dca-Da)/(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa/(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeVividLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*VividLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*VividLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*VividLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*VividLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType Xor(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { return(Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeXor(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa+Da-2*Sa*Da; /* Xor blend mode X=0,Y=1,Z=1 */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Xor(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Xor(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Xor(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Xor(p->index*Sa,Sa,q->index*Da,Da); } MagickExport MagickBooleanType CompositeImage(Image *image, const CompositeOperator compose,const Image *source_image, const ssize_t x_offset,const ssize_t y_offset) { MagickBooleanType status; status=CompositeImageChannel(image,DefaultChannels,compose,source_image, x_offset,y_offset); return(status); } MagickExport MagickBooleanType CompositeImageChannel(Image *image, const ChannelType channel,const CompositeOperator compose, const Image *composite,const ssize_t x_offset,const ssize_t y_offset) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; ExceptionInfo *exception; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, clip_to_self, status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; /* Prepare composite image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); (void) SetImageColorspace(source_image,image->colorspace); GetMagickPixelPacket(image,&zero); canvas_image=(Image *) NULL; amount=0.5; canvas_dissolve=1.0; clip_to_self=MagickTrue; percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case ClearCompositeOp: case SrcCompositeOp: case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: { /* Modify canvas outside the overlaid region. */ clip_to_self=MagickFalse; break; } case OverCompositeOp: { if (image->matte != MagickFalse) break; if (source_image->matte != MagickFalse) break; } case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) >= (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) >= (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const IndexPacket *source_indexes; register const PixelPacket *p; register IndexPacket *indexes; register PixelPacket *q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } source_indexes=GetCacheViewVirtualIndexQueue(source_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); (void) CopyMagickMemory(q,p,source_image->columns*sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (source_indexes != (const IndexPacket *) NULL)) (void) CopyMagickMemory(indexes,source_indexes, source_image->columns*sizeof(*indexes)); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); clip_to_self=MagickFalse; break; } case BlurCompositeOp: { CacheView *canvas_view, *source_view; MagickPixelPacket pixel; MagickRealType angle_range, angle_start, height, width; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling. Blur Image dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidGeometry","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=height=geometry_info.rho*2.0; if ((flags & HeightValue) != 0 ) height=geometry_info.sigma*2.0; /* default the unrotated ellipse width and height axis vectors */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0 ) { MagickRealType angle; angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, then restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter,1.0); /* do the variable blurring of each pixel in image */ pixel=zero; source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register PixelPacket *magick_restrict r; register IndexPacket *magick_restrict canvas_indexes; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view); for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } if (fabs(angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale*GetPixelBlue(p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } #if 0 if ( x == 10 && y == 60 ) { fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n", blur.x1, blur.x2, blur.y1, blur.y2); fprintf(stderr, "scaled by=%lf,%lf\n", QuantumScale*GetPixelRed(p), QuantumScale*GetPixelGreen(p)); } #endif ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(p), blur.y1*QuantumScale*GetPixelGreen(p), blur.x2*QuantumScale*GetPixelRed(p), blur.y2*QuantumScale*GetPixelGreen(p)); (void) ResamplePixelColor(resample_filter,(double) x_offset+x,(double) y_offset+y,&pixel); SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x); p++; r++; } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view, *source_view, *image_view; MagickPixelPacket pixel; MagickRealType horizontal_scale, vertical_scale; PointInfo center, offset; register IndexPacket *magick_restrict canvas_indexes; register PixelPacket *magick_restrict r; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=((MagickRealType) image->columns-1)/2.0; else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) == 0) center.x=(MagickRealType) (x_offset+geometry_info.xi); else center.x=geometry_info.xi; if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=((MagickRealType) image->rows-1)/2.0; else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ pixel=zero; image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view); for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } /* Displace the offset. */ offset.x=(double) ((horizontal_scale*(GetPixelRed(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0)); offset.y=(double) ((vertical_scale*(GetPixelGreen(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0)); (void) InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.opacity=(MagickRealType) QuantumRange*(1.0-(1.0-QuantumScale* pixel.opacity)*(1.0-QuantumScale*GetPixelOpacity(p))); SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x); p++; r++; } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; clip_to_self=MagickFalse; if ((canvas_dissolve+MagickEpsilon) > 1.0 ) { canvas_dissolve=1.0; clip_to_self=MagickTrue; } } break; } case BlendCompositeOp: { value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; clip_to_self=MagickFalse; if ((canvas_dissolve+MagickEpsilon) > 1.0) clip_to_self=MagickTrue; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. This Composition method is deprecated */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } value=GetImageArtifact(image,"compose:outside-overlay"); if (value != (const char *) NULL) clip_to_self=IsMagickTrue(value) == MagickFalse ? MagickTrue : MagickFalse; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsMagickTrue(value); /* Composite image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateCompositeImage(image,channel,compose,source_image, x_offset,y_offset,canvas_dissolve,source_dissolve,exception); if (status != MagickFalse) return(status); #endif status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; GetMagickPixelPacket(source_image,&zero); source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *pixels; double luma, hue, chroma, sans; MagickPixelPacket composite, canvas, source; register const IndexPacket *magick_restrict source_indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(PixelPacket *) NULL; p=(PixelPacket *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset; } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); source_indexes=GetCacheViewVirtualIndexQueue(source_view); GetMagickPixelPacket(source_image,&source); GetMagickPixelPacket(image,&canvas); hue=0.0; chroma=0.0; luma=0.0; for (x=0; x < (ssize_t) image->columns; x++) { if (clip_to_self != MagickFalse) { if (x < x_offset) { q++; continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } canvas.red=(MagickRealType) GetPixelRed(q); canvas.green=(MagickRealType) GetPixelGreen(q); canvas.blue=(MagickRealType) GetPixelBlue(q); if (image->matte != MagickFalse) canvas.opacity=(MagickRealType) GetPixelOpacity(q); if (image->colorspace == CMYKColorspace) canvas.index=(MagickRealType) GetPixelIndex(indexes+x); if (image->colorspace == CMYKColorspace) { canvas.red=(MagickRealType) QuantumRange-canvas.red; canvas.green=(MagickRealType) QuantumRange-canvas.green; canvas.blue=(MagickRealType) QuantumRange-canvas.blue; canvas.index=(MagickRealType) QuantumRange-canvas.index; } /* Handle canvas modifications outside overlaid region. */ composite=canvas; if ((pixels == (PixelPacket *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { switch (compose) { case DissolveCompositeOp: case BlendCompositeOp: { composite.opacity=(MagickRealType) (QuantumRange-canvas_dissolve* (QuantumRange-composite.opacity)); break; } case ClearCompositeOp: case SrcCompositeOp: { CompositeClear(&canvas,&composite); break; } case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { composite.opacity=(MagickRealType) TransparentOpacity; break; } default: { (void) GetOneVirtualMagickPixel(source_image,x-x_offset, y-y_offset,&composite,exception); break; } } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q,clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q,clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); if (image->matte != MagickFalse) SetPixelOpacity(q,clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); q++; continue; } /* Handle normal overlay of source onto canvas. */ source.red=(MagickRealType) GetPixelRed(p); source.green=(MagickRealType) GetPixelGreen(p); source.blue=(MagickRealType) GetPixelBlue(p); if (source_image->matte != MagickFalse) source.opacity=(MagickRealType) GetPixelOpacity(p); if (source_image->colorspace == CMYKColorspace) source.index=(MagickRealType) GetPixelIndex(source_indexes+ x-x_offset); if (source_image->colorspace == CMYKColorspace) { source.red=(MagickRealType) QuantumRange-source.red; source.green=(MagickRealType) QuantumRange-source.green; source.blue=(MagickRealType) QuantumRange-source.blue; source.index=(MagickRealType) QuantumRange-source.index; } switch (compose) { /* Duff-Porter Compositions */ case ClearCompositeOp: { CompositeClear(&canvas,&composite); break; } case SrcCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: { composite=source; break; } case NoCompositeOp: case DstCompositeOp: break; case OverCompositeOp: case SrcOverCompositeOp: { MagickPixelCompositeOver(&source,source.opacity,&canvas, canvas.opacity,&composite); break; } case DstOverCompositeOp: { MagickPixelCompositeOver(&canvas,canvas.opacity,&source, source.opacity,&composite); break; } case SrcInCompositeOp: case InCompositeOp: { CompositeIn(&source,&canvas,&composite); break; } case DstInCompositeOp: { CompositeIn(&canvas,&source,&composite); break; } case OutCompositeOp: case SrcOutCompositeOp: { CompositeOut(&source,&canvas,&composite); break; } case DstOutCompositeOp: { CompositeOut(&canvas,&source,&composite); break; } case AtopCompositeOp: case SrcAtopCompositeOp: { CompositeAtop(&source,&canvas,&composite); break; } case DstAtopCompositeOp: { CompositeAtop(&canvas,&source,&composite); break; } case XorCompositeOp: { CompositeXor(&source,&canvas,&composite); break; } /* Mathematical Compositions */ case PlusCompositeOp: { CompositePlus(&source,&canvas,channel,&composite); break; } case MinusDstCompositeOp: { CompositeMinus(&source,&canvas,channel,&composite); break; } case MinusSrcCompositeOp: { CompositeMinus(&canvas,&source,channel,&composite); break; } case ModulusAddCompositeOp: { CompositeModulusAdd(&source,&canvas,channel,&composite); break; } case ModulusSubtractCompositeOp: { CompositeModulusSubtract(&source,&canvas,channel,&composite); break; } case DifferenceCompositeOp: { CompositeDifference(&source,&canvas,channel,&composite); break; } case ExclusionCompositeOp: { CompositeExclusion(&source,&canvas,channel,&composite); break; } case MultiplyCompositeOp: { CompositeMultiply(&source,&canvas,channel,&composite); break; } case ScreenCompositeOp: { CompositeScreen(&source,&canvas,channel,&composite); break; } case DivideDstCompositeOp: { CompositeDivide(&source,&canvas,channel,&composite); break; } case DivideSrcCompositeOp: { CompositeDivide(&canvas,&source,channel,&composite); break; } case DarkenCompositeOp: { CompositeDarken(&source,&canvas,channel,&composite); break; } case LightenCompositeOp: { CompositeLighten(&source,&canvas,channel,&composite); break; } case DarkenIntensityCompositeOp: { CompositeDarkenIntensity(&source,&canvas,channel,&composite); break; } case LightenIntensityCompositeOp: { CompositeLightenIntensity(&source,&canvas,channel,&composite); break; } case MathematicsCompositeOp: { CompositeMathematics(&source,&canvas,channel,&geometry_info, &composite); break; } /* Lighting Compositions */ case ColorDodgeCompositeOp: { CompositeColorDodge(&source,&canvas,&composite); break; } case ColorBurnCompositeOp: { CompositeColorBurn(&source,&canvas,&composite); break; } case LinearDodgeCompositeOp: { CompositeLinearDodge(&source,&canvas,&composite); break; } case LinearBurnCompositeOp: { CompositeLinearBurn(&source,&canvas,&composite); break; } case HardLightCompositeOp: { CompositeHardLight(&source,&canvas,&composite); break; } case HardMixCompositeOp: { CompositeHardMix(&source,&canvas,&composite); break; } case OverlayCompositeOp: { /* Overlay = Reversed HardLight. */ CompositeHardLight(&canvas,&source,&composite); break; } case SoftLightCompositeOp: { CompositeSoftLight(&source,&canvas,&composite); break; } case LinearLightCompositeOp: { CompositeLinearLight(&source,&canvas,&composite); break; } case PegtopLightCompositeOp: { CompositePegtopLight(&source,&canvas,&composite); break; } case VividLightCompositeOp: { CompositeVividLight(&source,&canvas,&composite); break; } case PinLightCompositeOp: { CompositePinLight(&source,&canvas,&composite); break; } /* Other Composition */ case ChangeMaskCompositeOp: { if ((composite.opacity > ((MagickRealType) QuantumRange/2.0)) || (IsMagickColorSimilar(&source,&canvas) != MagickFalse)) composite.opacity=(MagickRealType) TransparentOpacity; else composite.opacity=(MagickRealType) OpaqueOpacity; break; } case BumpmapCompositeOp: { if (source.opacity == TransparentOpacity) break; CompositeBumpmap(&source,&canvas,&composite); break; } case DissolveCompositeOp: { MagickPixelCompositeOver(&source,(MagickRealType) (QuantumRange- source_dissolve*(QuantumRange-source.opacity)),&canvas, (MagickRealType) (QuantumRange-canvas_dissolve*(QuantumRange- canvas.opacity)),&composite); break; } case BlendCompositeOp: { MagickPixelCompositeBlend(&source,source_dissolve,&canvas, canvas_dissolve,&composite); break; } case StereoCompositeOp: { canvas.red=(MagickRealType) GetPixelRed(p); break; } case ThresholdCompositeOp: { CompositeThreshold(&source,&canvas,threshold,amount,&composite); break; } case ModulateCompositeOp: { ssize_t offset; if (source.opacity == TransparentOpacity) break; offset=(ssize_t) (MagickPixelIntensityToQuantum(&source)-midpoint); if (offset == 0) break; CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&composite.red,&composite.green, &composite.blue); break; } case HueCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&sans,&sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case SaturateCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&chroma, &sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case LuminizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&sans, &luma); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case ColorizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&sans, &sans,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { composite.red=source.red; break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { composite.green=source.green; break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { composite.blue=source.blue; break; } case CopyOpacityCompositeOp: { if (source.matte == MagickFalse) composite.opacity=(MagickRealType) (QuantumRange- MagickPixelIntensityToQuantum(&source)); else composite.opacity=source.opacity; break; } case CopyBlackCompositeOp: { if (source.colorspace != CMYKColorspace) ConvertRGBToCMYK(&source); composite.index=QuantumRange-source.index; break; } /* compose methods that are already handled */ case BlurCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: { composite=source; break; } default: break; } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q,clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q,clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); SetPixelOpacity(q,clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); p++; if (p >= (pixels+source_image->columns)) p=pixels; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImageChannel) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture) % % A description of each parameter follows: % % o image: the image. % % o texture: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; ExceptionInfo *exception; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->matte != MagickFalse) || (texture_image->matte != MagickFalse))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,image->compose,texture_image,x+ texture_image->tile_offset.x,y+texture_image->tile_offset.y); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,texture_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *texture_indexes; register const PixelPacket *p; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; size_t width; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,(y+ texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } texture_indexes=GetCacheViewVirtualIndexQueue(texture_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; (void) CopyMagickMemory(q,p,width*sizeof(*p)); if ((image->colorspace == CMYKColorspace) && (texture_image->colorspace == CMYKColorspace)) { (void) CopyMagickMemory(indexes,texture_indexes,width* sizeof(*indexes)); indexes+=width; } q+=width; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TextureImage) #endif proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
/* * Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/draw.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/memory_.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/resample.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o m p o s i t e I m a g e C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CompositeImageChannel() returns the second image composited onto * the first % at the specified offset, using the specified composite * method. % % The format of the CompositeImageChannel method is: % % * MagickBooleanType CompositeImage(Image *image, % const * CompositeOperator compose,Image *source_image, % const ssize_t * x_offset,const ssize_t y_offset) % MagickBooleanType * CompositeImageChannel(Image *image, % const ChannelType * channel,const CompositeOperator compose, % Image * *source_image,const ssize_t x_offset,const ssize_t y_offset) % % A * description of each parameter follows: % % o image: the canvas image, * modified by he composition % % o channel: the channel. % % o * compose: This operator affects how the composite is applied to % the * image. The operators and how they are utilized are listed here % * http://www.w3.org/TR/SVG12/#compositing. % % o source_image: the * composite (source) image. % % o x_offset: the column offset of the * composited image. % % o y_offset: the row offset of the composited * image. % % Extra Controls from Image meta-data in 'source_image' * (artifacts) % % o "compose:args" % A string containing extra * numerical arguments for specific compose % methods, generally * expressed as a 'geometry' or a comma separated list % of numbers. % * % Compose methods needing such arguments include "BlendCompositeOp" * and % "DisplaceCompositeOp". % % o "compose:outside-overlay" % * Modify how the composition is to effect areas not directly covered % * by the 'source_image' at the offset given. Normally this is % * dependant on the 'compose' method, especially Duff-Porter methods. % % * If set to "false" then disable all normal handling of pixels not % * covered by the source_image. Typically used for repeated tiling % * of the source_image by the calling API. % % Previous to IM v6.5.3-3 * this was called "modify-outside-overlay" % */ /* * * Programmers notes on SVG specification. * * * A Composition is defined by... * Color Function : f(Sc,Dc) where Sc and * Dc are the normizalized colors * Blending areas : X = 1 for area of * overlap ie: f(Sc,Dc) * Y = 1 for source * preserved * Z = 1 for canvas preserved * * * Conversion to transparency (then optimized) * Dca' = f(Sc, Dc)*Sa*Da + * Y*Sca*(1-Da) + Z*Dca*(1-Sa) * Da' = X*Sa*Da + Y*Sa*(1-Da) + * Z*Da*(1-Sa) * * * Where... * Sca = Sc*Sa normalized Source color divided by Source * alpha * Dca = Dc*Da normalized Dest color divided by Dest alpha * * Dc' = Dca'/Da' the desired color value for this channel. * * * Da' in in the follow formula as 'gamma' The resulting alpla value. * * * * Most functions use a blending mode of over (X=1,Y=1,Z=1) * this results in * the following optimizations... * gamma = Sa+Da-Sa*Da; * gamma = 1 - * QuantiumScale*alpha * QuantiumScale*beta; * opacity = * QuantiumScale*alpha*beta; // over blend, optimized 1-Gamma * * * The above SVG definitions also definate that Mathematical Composition * * methods should use a 'Over' blending mode for Alpha Channel. * It however * was not applied for composition modes of 'Plus', 'Minus', * the modulus * versions of 'Add' and 'Subtract'. * * * * Mathematical operator changes to be applied from IM v6.7... * * * 1/ Modulus modes 'Add' and 'Subtract' are obsoleted and renamed * * 'ModulusAdd' and 'ModulusSubtract' for clarity. * * * 2/ All mathematical compositions work as per the SVG specification * * with regard to blending. This now includes 'ModulusAdd' and * * 'ModulusSubtract'. * * * 3/ When the special channel flag 'sync' (syncronize channel updates) * * is turned off (enabled by default) then mathematical compositions are * * only performed on the channels specified, and are applied * * independantally of each other. In other words the mathematics is * * performed as 'pure' mathematical operations, rather than as image * * operations. */ static inline MagickRealType Atop(const MagickRealType p, const MagickRealType Sa, const MagickRealType q, const MagickRealType magick_unused(Da)) { magick_unreferenced(Da); return (p * Sa + q * (1.0 - Sa)); /* Da optimized out, Da/gamma => 1.0 */ } static inline void CompositeAtop(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ composite->opacity = q->opacity; /* optimized Da = 1.0-Gamma */ composite->red = Atop(p->red, Sa, q->red, 1.0); composite->green = Atop(p->green, Sa, q->green, 1.0); composite->blue = Atop(p->blue, Sa, q->blue, 1.0); if (q->colorspace == CMYKColorspace) composite->index = Atop(p->index, Sa, q->index, 1.0); } /* * What is this Composition method for? Can't find any specification! WARNING * this is not doing correct 'over' blend handling (Anthony Thyssen). */ static inline void CompositeBumpmap(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType intensity; intensity = MagickPixelIntensity(p); composite->red = QuantumScale * intensity * q->red; composite->green = QuantumScale * intensity * q->green; composite->blue = QuantumScale * intensity * q->blue; composite->opacity = (MagickRealType) QuantumScale *intensity * p->opacity; if (q->colorspace == CMYKColorspace) composite->index = QuantumScale * intensity * q->index; } static inline void CompositeClear(const MagickPixelPacket * q, MagickPixelPacket * composite) { composite->opacity = (MagickRealType) TransparentOpacity; composite->red = 0.0; composite->green = 0.0; composite->blue = 0.0; if (q->colorspace == CMYKColorspace) composite->index = 0.0; } static MagickRealType ColorBurn(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { #if 0 /* * Oct 2004 SVG specification. */ if (Sca * Da + Dca * Sa <= Sa * Da) return (Sca * (1.0 - Da) + Dca * (1.0 - Sa)); return (Sa * (Sca * Da + Dca * Sa - Sa * Da) / Sca + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); #else /* * March 2009 SVG specification. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca - Da) < MagickEpsilon)) return (Sa * Da + Dca * (1.0 - Sa)); if (Sca < MagickEpsilon) return (Dca * (1.0 - Sa)); return (Sa * Da - Sa * MagickMin(Da, (Da - Dca) * Sa / Sca) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); #endif } static inline void CompositeColorBurn(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * ColorBurn(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * ColorBurn(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * ColorBurn(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * ColorBurn(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } static MagickRealType ColorDodge(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* * Oct 2004 SVG specification. */ if ((Sca * Da + Dca * Sa) >= Sa * Da) return (Sa * Da + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); return (Dca * Sa * Sa / (Sa - Sca) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); #if 0 /* * New specification, March 2009 SVG specification. This specification * was also wrong of non-overlap cases. */ if ((fabs(Sca - Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return (Sca * (1.0 - Da)); if (fabs(Sca - Sa) < MagickEpsilon) return (Sa * Da + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); return (Sa * MagickMin(Da, Dca * Sa / (Sa - Sca))); #endif #if 0 /* * Working from first principles using the original formula: * * f(Sc,Dc) = Dc/(1-Sc) * * This works correctly! Looks like the 2004 model was right but just * required a extra condition for correct handling. */ if ((fabs(Sca - Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return (Sca * (1.0 - Da) + Dca * (1.0 - Sa)); if (fabs(Sca - Sa) < MagickEpsilon) return (Sa * Da + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); return (Dca * Sa * Sa / (Sa - Sca) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); #endif } static inline void CompositeColorDodge(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * ColorDodge(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * ColorDodge(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * ColorDodge(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * ColorDodge(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } static inline MagickRealType Darken(const MagickRealType p, const MagickRealType alpha, const MagickRealType q, const MagickRealType beta) { if (p < q) return (MagickOver_(p, alpha, q, beta)); /* src-over */ return (MagickOver_(q, beta, p, alpha)); /* dst-over */ } static inline void CompositeDarken(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { /* * Darken is equivalent to a 'Minimum' method OR a greyscale version of a * binary 'Or' OR the 'Intersection' of pixel sets. */ double gamma; if ((channel & SyncChannels) != 0) { composite->opacity = QuantumScale * p->opacity * q->opacity; /* Over Blend */ gamma = 1.0 - QuantumScale * composite->opacity; gamma = PerceptibleReciprocal(gamma); composite->red = gamma * Darken(p->red, p->opacity, q->red, q->opacity); composite->green = gamma * Darken(p->green, p->opacity, q->green, q->opacity); composite->blue = gamma * Darken(p->blue, p->opacity, q->blue, q->opacity); if (q->colorspace == CMYKColorspace) composite->index = gamma * Darken(p->index, p->opacity, q->index, q->opacity); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = MagickMax(p->opacity, q->opacity); if ((channel & RedChannel) != 0) composite->red = MagickMin(p->red, q->red); if ((channel & GreenChannel) != 0) composite->green = MagickMin(p->green, q->green); if ((channel & BlueChannel) != 0) composite->blue = MagickMin(p->blue, q->blue); if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = MagickMin(p->index, q->index); } } static inline void CompositeDarkenIntensity(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { /* * Select the pixel based on the intensity level. If 'Sync' flag select * whole pixel based on alpha weighted intensity. Otherwise use intensity * only, but restrict copy according to channel. */ if ((channel & SyncChannels) != 0) { MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; Da = 1.0 - QuantumScale * q->opacity; *composite = (Sa * MagickPixelIntensity(p) < Da * MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) < MagickPixelIntensity(q)); if ((channel & AlphaChannel) != 0) composite->opacity = from_p ? p->opacity : q->opacity; if ((channel & RedChannel) != 0) composite->red = from_p ? p->red : q->red; if ((channel & GreenChannel) != 0) composite->green = from_p ? p->green : q->green; if ((channel & BlueChannel) != 0) composite->blue = from_p ? p->blue : q->blue; if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } static inline MagickRealType Difference(const MagickRealType p, const MagickRealType Sa, const MagickRealType q, const MagickRealType Da) { /* Optimized by Multipling by QuantumRange (taken from gamma). */ return (Sa * p + Da * q - Sa * Da * 2.0 * MagickMin(p, q)); } static inline void CompositeDifference(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { double gamma; MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; if ((channel & SyncChannels) != 0) { gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per * SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); /* Values are not normalized as an optimization. */ composite->red = gamma * Difference(p->red, Sa, q->red, Da); composite->green = gamma * Difference(p->green, Sa, q->green, Da); composite->blue = gamma * Difference(p->blue, Sa, q->blue, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * Difference(p->index, Sa, q->index, Da); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange - fabs((double)(p->opacity - q->opacity)); if ((channel & RedChannel) != 0) composite->red = fabs((double)(p->red - q->red)); if ((channel & GreenChannel) != 0) composite->green = fabs((double)(p->green - q->green)); if ((channel & BlueChannel) != 0) composite->blue = fabs((double)(p->blue - q->blue)); if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = fabs((double)(p->index - q->index)); } } static MagickRealType Divide(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* * Divide Source by Destination * * f(Sc,Dc) = Sc / Dc * * But with appropriate handling for special case of Dc == 0 specifically so * that f(Black,Black)=Black and f(non-Black,Black)=White. It is * however also important to correctly do 'over' alpha blending which is * why the formula becomes so complex. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return (Sca * (1.0 - Da) + Dca * (1.0 - Sa)); if (fabs(Dca) < MagickEpsilon) return (Sa * Da + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); return (Sca * Da * Da / Dca + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); } static inline void CompositeDivide(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; if ((channel & SyncChannels) != 0) { gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per * SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * Divide(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * Divide(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * Divide(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * Divide(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange * (1.0 - Divide(Sa, 1.0, Da, 1.0)); if ((channel & RedChannel) != 0) composite->red = QuantumRange * Divide(QuantumScale * p->red, 1.0, QuantumScale * q->red, 1.0); if ((channel & GreenChannel) != 0) composite->green = QuantumRange * Divide(QuantumScale * p->green, 1.0, QuantumScale * q->green, 1.0); if ((channel & BlueChannel) != 0) composite->blue = QuantumRange * Divide(QuantumScale * p->blue, 1.0, QuantumScale * q->blue, 1.0); if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = QuantumRange * Divide(QuantumScale * p->index, 1.0, QuantumScale * q->index, 1.0); } } static MagickRealType Exclusion(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { return (Sca * Da + Dca * Sa - 2.0 * Sca * Dca + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); } static inline void CompositeExclusion(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { MagickRealType gamma, Sa, Da; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; if ((channel & SyncChannels) != 0) { gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per * SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * Exclusion(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * Exclusion(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * Exclusion(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * Exclusion(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange * (1.0 - Exclusion(Sa, 1.0, Da, 1.0)); if ((channel & RedChannel) != 0) composite->red = QuantumRange * Exclusion(QuantumScale * p->red, 1.0, QuantumScale * q->red, 1.0); if ((channel & GreenChannel) != 0) composite->green = QuantumRange * Exclusion(QuantumScale * p->green, 1.0, QuantumScale * q->green, 1.0); if ((channel & BlueChannel) != 0) composite->blue = QuantumRange * Exclusion(QuantumScale * p->blue, 1.0, QuantumScale * q->blue, 1.0); if (((channel & IndexChannel) != 0) && (q->colorspace == CMYKColorspace)) composite->index = QuantumRange * Exclusion(QuantumScale * p->index, 1.0, QuantumScale * q->index, 1.0); } } static MagickRealType HardLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { if ((2.0 * Sca) < Sa) return (2.0 * Sca * Dca + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); return (Sa * Da - 2.0 * (Da - Dca) * (Sa - Sca) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); } static inline void CompositeHardLight(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * HardLight(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * HardLight(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * HardLight(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * HardLight(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } static MagickRealType HardMix(const MagickRealType Sca, const MagickRealType Dca) { if ((Sca + Dca) < QuantumRange) return (0.0); else return (1.0); } static inline void CompositeHardMix(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * HardMix(p->red * Sa, q->red * Da); composite->green = gamma * HardMix(p->green * Sa, q->green * Da); composite->blue = gamma * HardMix(p->blue * Sa, q->blue * Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * HardMix(p->index * Sa, q->index * Da); } static void HCLComposite(const double hue, const double chroma, const double luma, MagickRealType * red, MagickRealType * green, MagickRealType * blue) { double b, c, g, h, m, r, x; /* * Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h = 6.0 * hue; c = chroma; x = c * (1.0 - fabs(fmod(h, 2.0) - 1.0)); r = 0.0; g = 0.0; b = 0.0; if ((0.0 <= h) && (h < 1.0)) { r = c; g = x; } else if ((1.0 <= h) && (h < 2.0)) { r = x; g = c; } else if ((2.0 <= h) && (h < 3.0)) { g = c; b = x; } else if ((3.0 <= h) && (h < 4.0)) { g = x; b = c; } else if ((4.0 <= h) && (h < 5.0)) { r = x; b = c; } else if ((5.0 <= h) && (h < 6.0)) { r = c; b = x; } m = luma - (0.298839 * r + 0.586811 * g + 0.114350 * b); *red = QuantumRange * (r + m); *green = QuantumRange * (g + m); *blue = QuantumRange * (b + m); } static void CompositeHCL(const MagickRealType red, const MagickRealType green, const MagickRealType blue, double *hue, double *chroma, double *luma) { double b, c, g, h, max, r; /* * Convert RGB to HCL colorspace. */ assert(hue != (double *)NULL); assert(chroma != (double *)NULL); assert(luma != (double *)NULL); r = (double)red; g = (double)green; b = (double)blue; max = MagickMax(r, MagickMax(g, b)); c = max - (double)MagickMin(r, MagickMin(g, b)); h = 0.0; if (c == 0) h = 0.0; else if (red == (MagickRealType) max) h = fmod((g - b) / c + 6.0, 6.0); else if (green == (MagickRealType) max) h = ((b - r) / c) + 2.0; else if (blue == (MagickRealType) max) h = ((r - g) / c) + 4.0; *hue = (h / 6.0); *chroma = QuantumScale * c; *luma = QuantumScale * (0.298839 * r + 0.586811 * g + 0.114350 * b); } static inline MagickRealType In(const MagickRealType p, const MagickRealType Sa, const MagickRealType magick_unused(q), const MagickRealType Da) { magick_unreferenced(q); return (Sa * p * Da); } static inline void CompositeIn(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { double gamma; MagickRealType Sa, Da; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = Sa * Da; composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); composite->red = gamma * In(p->red, Sa, q->red, Da); composite->green = gamma * In(p->green, Sa, q->green, Da); composite->blue = gamma * In(p->blue, Sa, q->blue, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * In(p->index, Sa, q->index, Da); } static inline MagickRealType Lighten(const MagickRealType p, const MagickRealType alpha, const MagickRealType q, const MagickRealType beta) { if (p > q) return (MagickOver_(p, alpha, q, beta)); /* src-over */ return (MagickOver_(q, beta, p, alpha)); /* dst-over */ } static inline void CompositeLighten(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { /* * Lighten is also equvalent to a 'Maximum' method OR a greyscale version * of a binary 'And' OR the 'Union' of pixel sets. */ double gamma; if ((channel & SyncChannels) != 0) { composite->opacity = QuantumScale * p->opacity * q->opacity; /* Over Blend */ gamma = 1.0 - QuantumScale * composite->opacity; gamma = PerceptibleReciprocal(gamma); composite->red = gamma * Lighten(p->red, p->opacity, q->red, q->opacity); composite->green = gamma * Lighten(p->green, p->opacity, q->green, q->opacity); composite->blue = gamma * Lighten(p->blue, p->opacity, q->blue, q->opacity); if (q->colorspace == CMYKColorspace) composite->index = gamma * Lighten(p->index, p->opacity, q->index, q->opacity); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = MagickMin(p->opacity, q->opacity); if ((channel & RedChannel) != 0) composite->red = MagickMax(p->red, q->red); if ((channel & GreenChannel) != 0) composite->green = MagickMax(p->green, q->green); if ((channel & BlueChannel) != 0) composite->blue = MagickMax(p->blue, q->blue); if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = MagickMax(p->index, q->index); } } static inline void CompositeLightenIntensity(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { /* * Select the pixel based on the intensity level. If 'Sync' flag select * whole pixel based on alpha weighted intensity. Otherwise use Intenisty * only, but restrict copy according to channel. */ if ((channel & SyncChannels) != 0) { MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; Da = 1.0 - QuantumScale * q->opacity; *composite = (Sa * MagickPixelIntensity(p) > Da * MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) > MagickPixelIntensity(q)); if ((channel & AlphaChannel) != 0) composite->opacity = from_p ? p->opacity : q->opacity; if ((channel & RedChannel) != 0) composite->red = from_p ? p->red : q->red; if ((channel & GreenChannel) != 0) composite->green = from_p ? p->green : q->green; if ((channel & BlueChannel) != 0) composite->blue = from_p ? p->blue : q->blue; if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } #if 0 static inline MagickRealType LinearDodge(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* * LinearDodge: simplifies to a trivial formula f(Sc,Dc) = Sc + Dc Dca' = * Sca + Dca */ return (Sca + Dca); } #endif static inline void CompositeLinearDodge(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { double gamma; MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); composite->red = gamma * (p->red * Sa + q->red * Da); composite->green = gamma * (p->green * Sa + q->green * Da); composite->blue = gamma * (p->blue * Sa + q->blue * Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * (p->index * Sa + q->index * Da); } static inline MagickRealType LinearBurn(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* * LinearBurn: as defined by Abode Photoshop, according to * http://www.simplefilter.de/en/basics/mixmods.html is: * * f(Sc,Dc) = Sc + Dc - 1 */ return (Sca + Dca - Sa * Da); } static inline void CompositeLinearBurn(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * LinearBurn(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * LinearBurn(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * LinearBurn(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * LinearBurn(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } static inline MagickRealType LinearLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { #if 0 /* * Previous formula, was only valid for fully-opaque images. */ return (Dca + 2 * Sca - 1.0); #else /* * LinearLight: as defined by Abode Photoshop, according to * http://www.simplefilter.de/en/basics/mixmods.html is: * * f(Sc,Dc) = Dc + 2*Sc - 1 */ return ((Sca - Sa) * Da + Sca + Dca); #endif } static inline void CompositeLinearLight(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * LinearLight(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * LinearLight(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * LinearLight(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * LinearLight(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } static inline MagickRealType Mathematics(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da, const GeometryInfo * geometry_info) { /* * 'Mathematics' a free form user control mathematical composition is * defined as... * * f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D * * Where the arguments A,B,C,D are (currently) passed to composite as a * command separated 'geometry' string in "compose:args" image artifact. * * A = a->rho, B = a->sigma, C = a->xi, D = a->psi * * Applying the SVG transparency formula (see above), we get... * * Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) * * Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + * Dca*(1.0-Sa) */ return (geometry_info->rho * Sca * Dca + geometry_info->sigma * Sca * Da + geometry_info->xi * Dca * Sa + geometry_info->psi * Sa * Da + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); } static inline void CompositeMathematics(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, const GeometryInfo * args, MagickPixelPacket * composite) { double gamma; MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* ??? - AT */ Da = 1.0 - QuantumScale * q->opacity; if ((channel & SyncChannels) != 0) { gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per * SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * Mathematics(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da, args); composite->green = gamma * Mathematics(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da, args); composite->blue = gamma * Mathematics(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da, args); if (q->colorspace == CMYKColorspace) composite->index = gamma * Mathematics(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da, args); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange * (1.0 - Mathematics(Sa, 1.0, Da, 1.0, args)); if ((channel & RedChannel) != 0) composite->red = QuantumRange * Mathematics(QuantumScale * p->red, 1.0, QuantumScale * q->red, 1.0, args); if ((channel & GreenChannel) != 0) composite->green = QuantumRange * Mathematics(QuantumScale * p->green, 1.0, QuantumScale * q->green, 1.0, args); if ((channel & BlueChannel) != 0) composite->blue = QuantumRange * Mathematics(QuantumScale * p->blue, 1.0, QuantumScale * q->blue, 1.0, args); if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = QuantumRange * Mathematics(QuantumScale * p->index, 1.0, QuantumScale * q->index, 1.0, args); } } static inline void CompositePlus(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { if ((channel & SyncChannels) != 0) { /* * NOTE: "Plus" does not use 'over' alpha-blending but uses a special * 'plus' form of alph-blending. It is the ONLY mathematical operator * to do this. this is what makes it different to the otherwise * equivalent "LinearDodge" composition method. * * Note however that color channels are still effected by the alpha * channel as a result of the blending, making it just as useless for * independant channel maths, just like all other mathematical * composition methods. * * As such the removal of the 'sync' flag, is still a usful convention. * * The MagickPixelCompositePlus() function is defined in * "composite-private.h" so it can also be used for Image Blending. */ MagickPixelCompositePlus(p, p->opacity, q, q->opacity, composite); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = p->opacity + q->opacity - QuantumRange; if ((channel & RedChannel) != 0) composite->red = p->red + q->red; if ((channel & GreenChannel) != 0) composite->green = p->green + q->green; if ((channel & BlueChannel) != 0) composite->blue = p->blue + q->blue; if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = p->index + q->index; } } static inline MagickRealType Minus(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType magick_unused(Da)) { /* * Minus Source from Destination * * f(Sc,Dc) = Sc - Dc * */ magick_unreferenced(Da); return (Sca + Dca - 2 * Dca * Sa); } static inline void CompositeMinus(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { double gamma; MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; if ((channel & SyncChannels) != 0) { gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per * SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); composite->red = gamma * Minus(p->red * Sa, Sa, q->red * Da, Da); composite->green = gamma * Minus(p->green * Sa, Sa, q->green * Da, Da); composite->blue = gamma * Minus(p->blue * Sa, Sa, q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * Minus(p->index * Sa, Sa, q->index * Da, Da); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange * (1.0 - (Sa - Da)); if ((channel & RedChannel) != 0) composite->red = p->red - q->red; if ((channel & GreenChannel) != 0) composite->green = p->green - q->green; if ((channel & BlueChannel) != 0) composite->blue = p->blue - q->blue; if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = p->index - q->index; } } static inline MagickRealType ModulusAdd(const MagickRealType p, const MagickRealType Sa, const MagickRealType q, const MagickRealType Da) { MagickRealType pixel; pixel = p + q; while (pixel > QuantumRange) pixel -= QuantumRange; while (pixel < 0.0) pixel += QuantumRange; return (pixel * Sa * Da + p * Sa * (1.0 - Da) + q * Da * (1.0 - Sa)); } static inline void CompositeModulusAdd(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { if ((channel & SyncChannels) != 0) { double gamma; MagickRealType Sa, Da; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per * SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); composite->red = ModulusAdd(p->red, Sa, q->red, Da); composite->green = ModulusAdd(p->green, Sa, q->green, Da); composite->blue = ModulusAdd(p->blue, Sa, q->blue, Da); if (q->colorspace == CMYKColorspace) composite->index = ModulusAdd(p->index, Sa, q->index, Da); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange - ModulusAdd(QuantumRange - p->opacity, 1.0, QuantumRange - q->opacity, 1.0); if ((channel & RedChannel) != 0) composite->red = ModulusAdd(p->red, 1.0, q->red, 1.0); if ((channel & GreenChannel) != 0) composite->green = ModulusAdd(p->green, 1.0, q->green, 1.0); if ((channel & BlueChannel) != 0) composite->blue = ModulusAdd(p->blue, 1.0, q->blue, 1.0); if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = ModulusAdd(p->index, 1.0, q->index, 1.0); } } static inline MagickRealType ModulusSubtract(const MagickRealType p, const MagickRealType Sa, const MagickRealType q, const MagickRealType Da) { MagickRealType pixel; pixel = p - q; while (pixel > QuantumRange) pixel -= QuantumRange; while (pixel < 0.0) pixel += QuantumRange; return (pixel * Sa * Da + p * Sa * (1.0 - Da) + q * Da * (1.0 - Sa)); } static inline void CompositeModulusSubtract(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { if ((channel & SyncChannels) != 0) { double gamma; MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); composite->red = ModulusSubtract(p->red, Sa, q->red, Da); composite->green = ModulusSubtract(p->green, Sa, q->green, Da); composite->blue = ModulusSubtract(p->blue, Sa, q->blue, Da); if (q->colorspace == CMYKColorspace) composite->index = ModulusSubtract(p->index, Sa, q->index, Da); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange - ModulusSubtract(QuantumRange - p->opacity, 1.0, QuantumRange - q->opacity, 1.0); if ((channel & RedChannel) != 0) composite->red = ModulusSubtract(p->red, 1.0, q->red, 1.0); if ((channel & GreenChannel) != 0) composite->green = ModulusSubtract(p->green, 1.0, q->green, 1.0); if ((channel & BlueChannel) != 0) composite->blue = ModulusSubtract(p->blue, 1.0, q->blue, 1.0); if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = ModulusSubtract(p->index, 1.0, q->index, 1.0); } } static inline MagickRealType Multiply(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { return (Sca * Dca + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); } static inline void CompositeMultiply(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; if ((channel & SyncChannels) != 0) { gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per * SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * Multiply(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * Multiply(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * Multiply(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * Multiply(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange * (1.0 - Sa * Da); if ((channel & RedChannel) != 0) composite->red = QuantumScale * p->red * q->red; if ((channel & GreenChannel) != 0) composite->green = QuantumScale * p->green * q->green; if ((channel & BlueChannel) != 0) composite->blue = QuantumScale * p->blue * q->blue; if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = QuantumScale * p->index * q->index; } } static inline MagickRealType Out(const MagickRealType p, const MagickRealType Sa, const MagickRealType magick_unused(q), const MagickRealType Da) { magick_unreferenced(q); return (Sa * p * (1.0 - Da)); } static inline void CompositeOut(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { double gamma; MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = Sa * (1.0 - Da); composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); composite->red = gamma * Out(p->red, Sa, q->red, Da); composite->green = gamma * Out(p->green, Sa, q->green, Da); composite->blue = gamma * Out(p->blue, Sa, q->blue, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * Out(p->index, Sa, q->index, Da); } static MagickRealType PegtopLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* * PegTop: A Soft-Light alternative: A continuous version of the * Softlight function, producing very similar results. * * f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc * * See http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs(Da) < MagickEpsilon) return (Sca); return (Dca * Dca * (Sa - 2.0 * Sca) / Da + Sca * (2.0 * Dca + 1.0 - Da) + Dca * (1.0 - Sa)); } static inline void CompositePegtopLight(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * PegtopLight(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * PegtopLight(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * PegtopLight(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * PegtopLight(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } static MagickRealType PinLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* * PinLight: A Photoshop 7 composition method * http://www.simplefilter.de/en/basics/mixmods.html * * f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if (Dca * Sa < Da * (2 * Sca - Sa)) return (Sca * (Da + 1.0) - Sa * Da + Dca * (1.0 - Sa)); if ((Dca * Sa) > (2 * Sca * Da)) return (Sca * Da + Sca + Dca * (1.0 - Sa)); return (Sca * (1.0 - Da) + Dca); } static inline void CompositePinLight(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * PinLight(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * PinLight(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * PinLight(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * PinLight(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } static inline MagickRealType Screen(const MagickRealType Sca, const MagickRealType Dca) { /* * Screen: A negated multiply f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ return (Sca + Dca - Sca * Dca); } static inline void CompositeScreen(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { double gamma; MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; if ((channel & SyncChannels) != 0) { gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per * SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); Sa *= (MagickRealType) QuantumScale; Da *= (MagickRealType) QuantumScale; /* optimization */ gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * Screen(p->red * Sa, q->red * Da); composite->green = gamma * Screen(p->green * Sa, q->green * Da); composite->blue = gamma * Screen(p->blue * Sa, q->blue * Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * Screen(p->index * Sa, q->index * Da); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange * (1.0 - Screen(Sa, Da)); if ((channel & RedChannel) != 0) composite->red = QuantumRange * Screen(QuantumScale * p->red, QuantumScale * q->red); if ((channel & GreenChannel) != 0) composite->green = QuantumRange * Screen(QuantumScale * p->green, QuantumScale * q->green); if ((channel & BlueChannel) != 0) composite->blue = QuantumRange * Screen(QuantumScale * p->blue, QuantumScale * q->blue); if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = QuantumRange * Screen(QuantumScale * p->index, QuantumScale * q->index); } } static MagickRealType SoftLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { #if 0 /* * Oct 2004 SVG specification -- was found to be incorrect See * http://lists.w3.org/Archives/Public/www-svg/2009Feb/0014.html. */ if (2.0 * Sca < Sa) return (Dca * (Sa - (1.0 - Dca / Da) * (2.0 * Sca - Sa)) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); if (8.0 * Dca <= Da) return (Dca * (Sa - (1.0 - Dca / Da) * (2.0 * Sca - Sa) * (3.0 - 8.0 * Dca / Da)) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); return ((Dca * Sa + (pow(Dca / Da, 0.5) * Da - Dca) * (2.0 * Sca - Sa)) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); #else MagickRealType alpha, beta; /* * New specification: March 2009 SVG specification. */ alpha = Dca / Da; if ((2.0 * Sca) < Sa) return (Dca * (Sa + (2.0 * Sca - Sa) * (1.0 - alpha)) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); if (((2.0 * Sca) > Sa) && ((4.0 * Dca) <= Da)) { beta = Dca * Sa + Da * (2.0 * Sca - Sa) * (4.0 * alpha * (4.0 * alpha + 1.0) * (alpha - 1.0) + 7.0 * alpha) + Sca * (1.0 - Da) + Dca * (1.0 - Sa); return (beta); } beta = Dca * Sa + Da * (2.0 * Sca - Sa) * (pow(alpha, 0.5) - alpha) + Sca * (1.0 - Da) + Dca * (1.0 - Sa); return (beta); #endif } static inline void CompositeSoftLight(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * SoftLight(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * SoftLight(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * SoftLight(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * SoftLight(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } /* * Deprecated Multiply difference by amount, if differance larger than * threshold??? What use this is is completely unknown The Opacity * calculation appears to be inverted -- Anthony Thyssen */ static inline MagickRealType Threshold(const MagickRealType p, const MagickRealType q, const MagickRealType threshold, const MagickRealType amount) { MagickRealType delta; delta = p - q; if ((MagickRealType) fabs((double)(2.0 * delta)) < threshold) return (q); return (q + delta * amount); } static inline void CompositeThreshold(const MagickPixelPacket * p, const MagickPixelPacket * q, const MagickRealType threshold, const MagickRealType amount, MagickPixelPacket * composite) { composite->red = Threshold(p->red, q->red, threshold, amount); composite->green = Threshold(p->green, q->green, threshold, amount); composite->blue = Threshold(p->blue, q->blue, threshold, amount); composite->opacity = QuantumRange - Threshold(p->opacity, q->opacity, threshold, amount); if (q->colorspace == CMYKColorspace) composite->index = Threshold(p->index, q->index, threshold, amount); } static MagickRealType VividLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* * VividLight: A Photoshop 7 composition method. See * http://www.simplefilter.de/en/basics/mixmods.html. * * f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs(Sa) < MagickEpsilon) || (fabs(Sca - Sa) < MagickEpsilon)) return (Sa * Da + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); if ((2 * Sca) <= Sa) return (Sa * (Da + Sa * (Dca - Da) / (2.0 * Sca)) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); return (Dca * Sa * Sa / (2.0 * (Sa - Sca)) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); } static inline void CompositeVividLight(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * VividLight(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * VividLight(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * VividLight(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * VividLight(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } static MagickRealType Xor(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { return (Sca * (1.0 - Da) + Dca * (1.0 - Sa)); } static inline void CompositeXor(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = Sa + Da - 2 * Sa * Da; /* Xor blend mode X=0,Y=1,Z=1 */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); composite->red = gamma * Xor(p->red * Sa, Sa, q->red * Da, Da); composite->green = gamma * Xor(p->green * Sa, Sa, q->green * Da, Da); composite->blue = gamma * Xor(p->blue * Sa, Sa, q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * Xor(p->index * Sa, Sa, q->index * Da, Da); } MagickExport MagickBooleanType CompositeImage(Image * image, const CompositeOperator compose, const Image * source_image, const ssize_t x_offset, const ssize_t y_offset) { MagickBooleanType status; status = CompositeImageChannel(image, DefaultChannels, compose, source_image, x_offset, y_offset); return (status); } MagickExport MagickBooleanType CompositeImageChannel(Image * image, const ChannelType channel, const CompositeOperator compose, const Image * composite, const ssize_t x_offset, const ssize_t y_offset) { #define CompositeImageTag "Composite/Image" CacheView * source_view, *image_view; const char *value; ExceptionInfo * exception; GeometryInfo geometry_info; Image * canvas_image, *source_image; MagickBooleanType clamp, clip_to_self, status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; /* * Prepare composite image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image, DirectClass) == MagickFalse) return (MagickFalse); exception = (&image->exception); source_image = CloneImage(composite, 0, 0, MagickTrue, exception); if (source_image == (const Image *)NULL) return (MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void)SetImageColorspace(image, sRGBColorspace); (void)SetImageColorspace(source_image, image->colorspace); GetMagickPixelPacket(image, &zero); canvas_image = (Image *) NULL; amount = 0.5; canvas_dissolve = 1.0; clip_to_self = MagickTrue; percent_luma = 100.0; percent_chroma = 100.0; source_dissolve = 1.0; threshold = 0.05 f; switch (compose) { case ClearCompositeOp: case SrcCompositeOp: case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: { /* * Modify canvas outside the overlaid region. */ clip_to_self = MagickFalse; break; } case OverCompositeOp: { if (image->matte != MagickFalse) break; if (source_image->matte != MagickFalse) break; } case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset + (ssize_t) source_image->columns) >= (ssize_t) image->columns) break; if ((y_offset + (ssize_t) source_image->rows) >= (ssize_t) image->rows) break; status = MagickTrue; source_view = AcquireVirtualCacheView(source_image, exception); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const IndexPacket * source_indexes; register const PixelPacket * p; register IndexPacket * indexes; register PixelPacket * q; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(source_view, 0, y, source_image->columns, 1, exception); q = GetCacheViewAuthenticPixels(image_view, x_offset, y + y_offset, source_image->columns, 1, exception); if ((p == (const PixelPacket *)NULL) || (q == (PixelPacket *) NULL)) { status = MagickFalse; continue; } source_indexes = GetCacheViewVirtualIndexQueue(source_view); indexes = GetCacheViewAuthenticIndexQueue(image_view); (void)CopyMagickMemory(q, p, source_image->columns * sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (source_indexes != (const IndexPacket *)NULL)) (void)CopyMagickMemory(indexes, source_indexes, source_image->columns * sizeof(*indexes)); sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, CompositeImageTag, (MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } source_view = DestroyCacheView(source_view); image_view = DestroyCacheView(image_view); source_image = DestroyImage(source_image); return (status); } case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { /* * Modify canvas outside the overlaid region and require an alpha * channel to exist, to add transparency. */ if (image->matte == MagickFalse) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel); clip_to_self = MagickFalse; break; } case BlurCompositeOp: { CacheView * canvas_view, *source_view; MagickPixelPacket pixel; MagickRealType angle_range, angle_start, height, width; ResampleFilter * resample_filter; SegmentInfo blur; /* * Blur Image by resampling. * * Blur Image dictated by an overlay gradient map: X = red_channel; * Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image = CloneImage(image, image->columns, image->rows, MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image = DestroyImage(source_image); return (MagickFalse); } /* * Gather the maximum blur sigma values from user. */ SetGeometryInfo(&geometry_info); flags = NoValue; value = GetImageArtifact(image, "compose:args"); if (value != (char *)NULL) flags = ParseGeometry(value, &geometry_info); if ((flags & WidthValue) == 0) { (void)ThrowMagickException(exception, GetMagickModule(), OptionWarning, "InvalidGeometry", "'%s' '%s'", "compose:args", value); source_image = DestroyImage(source_image); canvas_image = DestroyImage(canvas_image); return (MagickFalse); } /* * Users input sigma now needs to be converted to the EWA ellipse * size. The filter defaults to a sigma of 0.5 so to make this * match the users input the ellipse size needs to be doubled. */ width = height = geometry_info.rho * 2.0; if ((flags & HeightValue) != 0) height = geometry_info.sigma * 2.0; /* default the unrotated ellipse width and height axis vectors */ blur.x1 = width; blur.x2 = 0.0; blur.y1 = 0.0; blur.y2 = height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0) { MagickRealType angle; angle = DegreesToRadians(geometry_info.xi); blur.x1 = width * cos(angle); blur.x2 = width * sin(angle); blur.y1 = (-height * sin(angle)); blur.y2 = height * cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start = 0.0; angle_range = 0.0; if ((flags & YValue) != 0) { angle_start = DegreesToRadians(geometry_info.xi); angle_range = DegreesToRadians(geometry_info.psi) - angle_start; } /* * Set up a gaussian cylindrical filter for EWA Bluring. * * As the minimum ellipse radius of support*1.0 the EWA algorithm * can only produce a minimum blur of 0.5 for Gaussian * (support=2.0) This means that even 'No Blur' will be still a * little blurry! * * The solution (as well as the problem of preventing any user * expert filter settings, is to set our own user settings, then * restore them afterwards. */ resample_filter = AcquireResampleFilter(image, exception); SetResampleFilter(resample_filter, GaussianFilter, 1.0); /* do the variable blurring of each pixel in image */ pixel = zero; source_view = AcquireVirtualCacheView(source_image, exception); canvas_view = AcquireAuthenticCacheView(canvas_image, exception); for (y = 0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket * magick_restrict p; register PixelPacket * magick_restrict r; register IndexPacket * magick_restrict canvas_indexes; register ssize_t x; if (((y + y_offset) < 0) || ((y + y_offset) >= (ssize_t) image->rows)) continue; p = GetCacheViewVirtualPixels(source_view, 0, y, source_image->columns, 1, exception); r = QueueCacheViewAuthenticPixels(canvas_view, 0, y, canvas_image->columns, 1, exception); if ((p == (const PixelPacket *)NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes = GetCacheViewAuthenticIndexQueue(canvas_view); for (x = 0; x < (ssize_t) source_image->columns; x++) { if (((x_offset + x) < 0) || ((x_offset + x) >= (ssize_t) image->columns)) { p++; continue; } if (fabs(angle_range) > MagickEpsilon) { MagickRealType angle; angle = angle_start + angle_range * QuantumScale * GetPixelBlue(p); blur.x1 = width * cos(angle); blur.x2 = width * sin(angle); blur.y1 = (-height * sin(angle)); blur.y2 = height * cos(angle); } #if 0 if (x == 10 && y == 60) { fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n", blur.x1, blur.x2, blur.y1, blur.y2); fprintf(stderr, "scaled by=%lf,%lf\n", QuantumScale * GetPixelRed(p), QuantumScale * GetPixelGreen(p)); } #endif ScaleResampleFilter(resample_filter, blur.x1 * QuantumScale * GetPixelRed(p), blur.y1 * QuantumScale * GetPixelGreen(p), blur.x2 * QuantumScale * GetPixelRed(p), blur.y2 * QuantumScale * GetPixelGreen(p)); (void)ResamplePixelColor(resample_filter, (double)x_offset + x, (double) y_offset + y, &pixel); SetPixelPacket(canvas_image, &pixel, r, canvas_indexes + x); p++; r++; } sync = SyncCacheViewAuthenticPixels(canvas_view, exception); if (sync == MagickFalse) break; } resample_filter = DestroyResampleFilter(resample_filter); source_view = DestroyCacheView(source_view); canvas_view = DestroyCacheView(canvas_view); source_image = DestroyImage(source_image); source_image = canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView * canvas_view, *source_view, *image_view; MagickPixelPacket pixel; MagickRealType horizontal_scale, vertical_scale; PointInfo center, offset; register IndexPacket * magick_restrict canvas_indexes; register PixelPacket * magick_restrict r; /* * Displace/Distort based on overlay gradient map: X = * red_channel; Y = green_channel; compose:args = * x_scale[,y_scale[,center.x,center.y]] */ canvas_image = CloneImage(image, image->columns, image->rows, MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image = DestroyImage(source_image); return (MagickFalse); } SetGeometryInfo(&geometry_info); flags = NoValue; value = GetImageArtifact(image, "compose:args"); if (value != (char *)NULL) flags = ParseGeometry(value, &geometry_info); if ((flags & (WidthValue | HeightValue)) == 0) { if ((flags & AspectValue) == 0) { horizontal_scale = (MagickRealType) (source_image->columns - 1) / 2.0; vertical_scale = (MagickRealType) (source_image->rows - 1) / 2.0; } else { horizontal_scale = (MagickRealType) (image->columns - 1) / 2.0; vertical_scale = (MagickRealType) (image->rows - 1) / 2.0; } } else { horizontal_scale = geometry_info.rho; vertical_scale = geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale *= (source_image->columns - 1) / 200.0; vertical_scale *= (source_image->rows - 1) / 200.0; } else { horizontal_scale *= (image->columns - 1) / 200.0; vertical_scale *= (image->rows - 1) / 200.0; } } if ((flags & HeightValue) == 0) vertical_scale = horizontal_scale; } /* * Determine fixed center point for absolute distortion map * Absolute distort == Displace offset relative to a fixed * absolute point Select that point according to +X+Y user * inputs. default = center of overlay image arg flag '!' = * locations/percentage relative to background image */ center.x = (MagickRealType) x_offset; center.y = (MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x = ((MagickRealType) image->columns - 1) / 2.0; else center.x = (MagickRealType) (x_offset + (source_image->columns - 1) / 2.0); else if ((flags & AspectValue) == 0) center.x = (MagickRealType) (x_offset + geometry_info.xi); else center.x = geometry_info.xi; if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y = ((MagickRealType) image->rows - 1) / 2.0; else center.y = (MagickRealType) (y_offset + (source_image->rows - 1) / 2.0); else if ((flags & AspectValue) != 0) center.y = geometry_info.psi; else center.y = (MagickRealType) (y_offset + geometry_info.psi); } /* * Shift the pixel offset point as defined by the provided, * displacement/distortion map. -- Like a lens... */ pixel = zero; image_view = AcquireVirtualCacheView(image, exception); source_view = AcquireVirtualCacheView(source_image, exception); canvas_view = AcquireAuthenticCacheView(canvas_image, exception); for (y = 0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket * magick_restrict p; register ssize_t x; if (((y + y_offset) < 0) || ((y + y_offset) >= (ssize_t) image->rows)) continue; p = GetCacheViewVirtualPixels(source_view, 0, y, source_image->columns, 1, exception); r = QueueCacheViewAuthenticPixels(canvas_view, 0, y, canvas_image->columns, 1, exception); if ((p == (const PixelPacket *)NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes = GetCacheViewAuthenticIndexQueue(canvas_view); for (x = 0; x < (ssize_t) source_image->columns; x++) { if (((x_offset + x) < 0) || ((x_offset + x) >= (ssize_t) image->columns)) { p++; continue; } /* * Displace the offset. */ offset.x = (double)((horizontal_scale * (GetPixelRed(p) - (((MagickRealType) QuantumRange + 1.0) / 2.0))) / (((MagickRealType) QuantumRange + 1.0) / 2.0) + center.x + ((compose == DisplaceCompositeOp) ? x : 0)); offset.y = (double)((vertical_scale * (GetPixelGreen(p) - (((MagickRealType) QuantumRange + 1.0) / 2.0))) / (((MagickRealType) QuantumRange + 1.0) / 2.0) + center.y + ((compose == DisplaceCompositeOp) ? y : 0)); (void)InterpolateMagickPixelPacket(image, image_view, UndefinedInterpolatePixel, (double)offset.x, (double)offset.y, &pixel, exception); /* * Mask with the 'invalid pixel mask' in alpha channel. */ pixel.opacity = (MagickRealType) QuantumRange *(1.0 - (1.0 - QuantumScale * pixel.opacity) * (1.0 - QuantumScale * GetPixelOpacity(p))); SetPixelPacket(canvas_image, &pixel, r, canvas_indexes + x); p++; r++; } sync = SyncCacheViewAuthenticPixels(canvas_view, exception); if (sync == MagickFalse) break; } canvas_view = DestroyCacheView(canvas_view); source_view = DestroyCacheView(source_view); image_view = DestroyCacheView(image_view); source_image = DestroyImage(source_image); source_image = canvas_image; break; } case DissolveCompositeOp: { /* * Geometry arguments to dissolve factors. */ value = GetImageArtifact(image, "compose:args"); if (value != (char *)NULL) { flags = ParseGeometry(value, &geometry_info); source_dissolve = geometry_info.rho / 100.0; canvas_dissolve = 1.0; if ((source_dissolve - MagickEpsilon) < 0.0) source_dissolve = 0.0; if ((source_dissolve + MagickEpsilon) > 1.0) { canvas_dissolve = 2.0 - source_dissolve; source_dissolve = 1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve = geometry_info.sigma / 100.0; if ((canvas_dissolve - MagickEpsilon) < 0.0) canvas_dissolve = 0.0; clip_to_self = MagickFalse; if ((canvas_dissolve + MagickEpsilon) > 1.0) { canvas_dissolve = 1.0; clip_to_self = MagickTrue; } } break; } case BlendCompositeOp: { value = GetImageArtifact(image, "compose:args"); if (value != (char *)NULL) { flags = ParseGeometry(value, &geometry_info); source_dissolve = geometry_info.rho / 100.0; canvas_dissolve = 1.0 - source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve = geometry_info.sigma / 100.0; clip_to_self = MagickFalse; if ((canvas_dissolve + MagickEpsilon) > 1.0) clip_to_self = MagickTrue; } break; } case MathematicsCompositeOp: { /* * Just collect the values from "compose:args", setting. Unused * values are set to zero automagically. * * Arguments are normally a comma separated list, so this probably * should be changed to some 'general comma list' parser, (with a * minimum number of values) */ SetGeometryInfo(&geometry_info); value = GetImageArtifact(image, "compose:args"); if (value != (char *)NULL) (void)ParseGeometry(value, &geometry_info); break; } case ModulateCompositeOp: { /* * Determine the luma and chroma scale. */ value = GetImageArtifact(image, "compose:args"); if (value != (char *)NULL) { flags = ParseGeometry(value, &geometry_info); percent_luma = geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma = geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* * Determine the amount and threshold. This Composition method is * deprecated */ value = GetImageArtifact(image, "compose:args"); if (value != (char *)NULL) { flags = ParseGeometry(value, &geometry_info); amount = geometry_info.rho; threshold = geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold = 0.05 f; } threshold *= QuantumRange; break; } default: break; } value = GetImageArtifact(image, "compose:outside-overlay"); if (value != (const char *)NULL) clip_to_self = IsMagickTrue(value) == MagickFalse ? MagickTrue : MagickFalse; clamp = MagickTrue; value = GetImageArtifact(image, "compose:clamp"); if (value != (const char *)NULL) clamp = IsMagickTrue(value); /* * Composite image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status = AccelerateCompositeImage(image, channel, compose, source_image, x_offset, y_offset, canvas_dissolve, source_dissolve, exception); if (status != MagickFalse) return (status); #endif status = MagickTrue; progress = 0; midpoint = ((MagickRealType) QuantumRange + 1.0) / 2; GetMagickPixelPacket(source_image, &zero); source_view = AcquireVirtualCacheView(source_image, exception); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { const PixelPacket * pixels; double luma, hue, chroma, sans; MagickPixelPacket composite, canvas, source; register const IndexPacket * magick_restrict source_indexes; register const PixelPacket * magick_restrict p; register IndexPacket * magick_restrict indexes; register ssize_t x; register PixelPacket * magick_restrict q; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y - y_offset) >= (ssize_t) source_image->rows) continue; } /* * If pixels is NULL, y is outside overlay region. */ pixels = (PixelPacket *) NULL; p = (PixelPacket *) NULL; if ((y >= y_offset) && ((y - y_offset) < (ssize_t) source_image->rows)) { p = GetCacheViewVirtualPixels(source_view, 0, y - y_offset, source_image->columns, 1, exception); if (p == (const PixelPacket *)NULL) { status = MagickFalse; continue; } pixels = p; if (x_offset < 0) p -= x_offset; } q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } indexes = GetCacheViewAuthenticIndexQueue(image_view); source_indexes = GetCacheViewVirtualIndexQueue(source_view); GetMagickPixelPacket(source_image, &source); GetMagickPixelPacket(image, &canvas); hue = 0.0; chroma = 0.0; luma = 0.0; for (x = 0; x < (ssize_t) image->columns; x++) { if (clip_to_self != MagickFalse) { if (x < x_offset) { q++; continue; } if ((x - x_offset) >= (ssize_t) source_image->columns) break; } canvas.red = (MagickRealType) GetPixelRed(q); canvas.green = (MagickRealType) GetPixelGreen(q); canvas.blue = (MagickRealType) GetPixelBlue(q); if (image->matte != MagickFalse) canvas.opacity = (MagickRealType) GetPixelOpacity(q); if (image->colorspace == CMYKColorspace) canvas.index = (MagickRealType) GetPixelIndex(indexes + x); if (image->colorspace == CMYKColorspace) { canvas.red = (MagickRealType) QuantumRange - canvas.red; canvas.green = (MagickRealType) QuantumRange - canvas.green; canvas.blue = (MagickRealType) QuantumRange - canvas.blue; canvas.index = (MagickRealType) QuantumRange - canvas.index; } /* * Handle canvas modifications outside overlaid region. */ composite = canvas; if ((pixels == (PixelPacket *) NULL) || (x < x_offset) || ((x - x_offset) >= (ssize_t) source_image->columns)) { switch (compose) { case DissolveCompositeOp: case BlendCompositeOp: { composite.opacity = (MagickRealType) (QuantumRange - canvas_dissolve * (QuantumRange - composite.opacity)); break; } case ClearCompositeOp: case SrcCompositeOp: { CompositeClear(&canvas, &composite); break; } case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { composite.opacity = (MagickRealType) TransparentOpacity; break; } default: { (void)GetOneVirtualMagickPixel(source_image, x - x_offset, y - y_offset, &composite, exception); break; } } if (image->colorspace == CMYKColorspace) { composite.red = (MagickRealType) QuantumRange - composite.red; composite.green = (MagickRealType) QuantumRange - composite.green; composite.blue = (MagickRealType) QuantumRange - composite.blue; composite.index = (MagickRealType) QuantumRange - composite.index; } SetPixelRed(q, clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q, clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q, clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); if (image->matte != MagickFalse) SetPixelOpacity(q, clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes + x, clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); q++; continue; } /* * Handle normal overlay of source onto canvas. */ source.red = (MagickRealType) GetPixelRed(p); source.green = (MagickRealType) GetPixelGreen(p); source.blue = (MagickRealType) GetPixelBlue(p); if (source_image->matte != MagickFalse) source.opacity = (MagickRealType) GetPixelOpacity(p); if (source_image->colorspace == CMYKColorspace) source.index = (MagickRealType) GetPixelIndex(source_indexes + x - x_offset); if (source_image->colorspace == CMYKColorspace) { source.red = (MagickRealType) QuantumRange - source.red; source.green = (MagickRealType) QuantumRange - source.green; source.blue = (MagickRealType) QuantumRange - source.blue; source.index = (MagickRealType) QuantumRange - source.index; } switch (compose) { /* Duff-Porter Compositions */ case ClearCompositeOp: { CompositeClear(&canvas, &composite); break; } case SrcCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: { composite = source; break; } case NoCompositeOp: case DstCompositeOp: break; case OverCompositeOp: case SrcOverCompositeOp: { MagickPixelCompositeOver(&source, source.opacity, &canvas, canvas.opacity, &composite); break; } case DstOverCompositeOp: { MagickPixelCompositeOver(&canvas, canvas.opacity, &source, source.opacity, &composite); break; } case SrcInCompositeOp: case InCompositeOp: { CompositeIn(&source, &canvas, &composite); break; } case DstInCompositeOp: { CompositeIn(&canvas, &source, &composite); break; } case OutCompositeOp: case SrcOutCompositeOp: { CompositeOut(&source, &canvas, &composite); break; } case DstOutCompositeOp: { CompositeOut(&canvas, &source, &composite); break; } case AtopCompositeOp: case SrcAtopCompositeOp: { CompositeAtop(&source, &canvas, &composite); break; } case DstAtopCompositeOp: { CompositeAtop(&canvas, &source, &composite); break; } case XorCompositeOp: { CompositeXor(&source, &canvas, &composite); break; } /* Mathematical Compositions */ case PlusCompositeOp: { CompositePlus(&source, &canvas, channel, &composite); break; } case MinusDstCompositeOp: { CompositeMinus(&source, &canvas, channel, &composite); break; } case MinusSrcCompositeOp: { CompositeMinus(&canvas, &source, channel, &composite); break; } case ModulusAddCompositeOp: { CompositeModulusAdd(&source, &canvas, channel, &composite); break; } case ModulusSubtractCompositeOp: { CompositeModulusSubtract(&source, &canvas, channel, &composite); break; } case DifferenceCompositeOp: { CompositeDifference(&source, &canvas, channel, &composite); break; } case ExclusionCompositeOp: { CompositeExclusion(&source, &canvas, channel, &composite); break; } case MultiplyCompositeOp: { CompositeMultiply(&source, &canvas, channel, &composite); break; } case ScreenCompositeOp: { CompositeScreen(&source, &canvas, channel, &composite); break; } case DivideDstCompositeOp: { CompositeDivide(&source, &canvas, channel, &composite); break; } case DivideSrcCompositeOp: { CompositeDivide(&canvas, &source, channel, &composite); break; } case DarkenCompositeOp: { CompositeDarken(&source, &canvas, channel, &composite); break; } case LightenCompositeOp: { CompositeLighten(&source, &canvas, channel, &composite); break; } case DarkenIntensityCompositeOp: { CompositeDarkenIntensity(&source, &canvas, channel, &composite); break; } case LightenIntensityCompositeOp: { CompositeLightenIntensity(&source, &canvas, channel, &composite); break; } case MathematicsCompositeOp: { CompositeMathematics(&source, &canvas, channel, &geometry_info, &composite); break; } /* Lighting Compositions */ case ColorDodgeCompositeOp: { CompositeColorDodge(&source, &canvas, &composite); break; } case ColorBurnCompositeOp: { CompositeColorBurn(&source, &canvas, &composite); break; } case LinearDodgeCompositeOp: { CompositeLinearDodge(&source, &canvas, &composite); break; } case LinearBurnCompositeOp: { CompositeLinearBurn(&source, &canvas, &composite); break; } case HardLightCompositeOp: { CompositeHardLight(&source, &canvas, &composite); break; } case HardMixCompositeOp: { CompositeHardMix(&source, &canvas, &composite); break; } case OverlayCompositeOp: { /* Overlay = Reversed HardLight. */ CompositeHardLight(&canvas, &source, &composite); break; } case SoftLightCompositeOp: { CompositeSoftLight(&source, &canvas, &composite); break; } case LinearLightCompositeOp: { CompositeLinearLight(&source, &canvas, &composite); break; } case PegtopLightCompositeOp: { CompositePegtopLight(&source, &canvas, &composite); break; } case VividLightCompositeOp: { CompositeVividLight(&source, &canvas, &composite); break; } case PinLightCompositeOp: { CompositePinLight(&source, &canvas, &composite); break; } /* Other Composition */ case ChangeMaskCompositeOp: { if ((composite.opacity > ((MagickRealType) QuantumRange / 2.0)) || (IsMagickColorSimilar(&source, &canvas) != MagickFalse)) composite.opacity = (MagickRealType) TransparentOpacity; else composite.opacity = (MagickRealType) OpaqueOpacity; break; } case BumpmapCompositeOp: { if (source.opacity == TransparentOpacity) break; CompositeBumpmap(&source, &canvas, &composite); break; } case DissolveCompositeOp: { MagickPixelCompositeOver(&source, (MagickRealType) (QuantumRange - source_dissolve * (QuantumRange - source.opacity)), &canvas, (MagickRealType) (QuantumRange - canvas_dissolve * (QuantumRange - canvas.opacity)), &composite); break; } case BlendCompositeOp: { MagickPixelCompositeBlend(&source, source_dissolve, &canvas, canvas_dissolve, &composite); break; } case StereoCompositeOp: { canvas.red = (MagickRealType) GetPixelRed(p); break; } case ThresholdCompositeOp: { CompositeThreshold(&source, &canvas, threshold, amount, &composite); break; } case ModulateCompositeOp: { ssize_t offset; if (source.opacity == TransparentOpacity) break; offset = (ssize_t) (MagickPixelIntensityToQuantum(&source) - midpoint); if (offset == 0) break; CompositeHCL(canvas.red, canvas.green, canvas.blue, &hue, &chroma, &luma); luma += (0.01 * percent_luma * offset) / midpoint; chroma *= 0.01 * percent_chroma; HCLComposite(hue, chroma, luma, &composite.red, &composite.green, &composite.blue); break; } case HueCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite = source; break; } CompositeHCL(canvas.red, canvas.green, canvas.blue, &hue, &chroma, &luma); CompositeHCL(source.red, source.green, source.blue, &hue, &sans, &sans); HCLComposite(hue, chroma, luma, &composite.red, &composite.green, &composite.blue); if (source.opacity < canvas.opacity) composite.opacity = source.opacity; break; } case SaturateCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite = source; break; } CompositeHCL(canvas.red, canvas.green, canvas.blue, &hue, &chroma, &luma); CompositeHCL(source.red, source.green, source.blue, &sans, &chroma, &sans); HCLComposite(hue, chroma, luma, &composite.red, &composite.green, &composite.blue); if (source.opacity < canvas.opacity) composite.opacity = source.opacity; break; } case LuminizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite = source; break; } CompositeHCL(canvas.red, canvas.green, canvas.blue, &hue, &chroma, &luma); CompositeHCL(source.red, source.green, source.blue, &sans, &sans, &luma); HCLComposite(hue, chroma, luma, &composite.red, &composite.green, &composite.blue); if (source.opacity < canvas.opacity) composite.opacity = source.opacity; break; } case ColorizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite = source; break; } CompositeHCL(canvas.red, canvas.green, canvas.blue, &sans, &sans, &luma); CompositeHCL(source.red, source.green, source.blue, &hue, &chroma, &sans); HCLComposite(hue, chroma, luma, &composite.red, &composite.green, &composite.blue); if (source.opacity < canvas.opacity) composite.opacity = source.opacity; break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { composite.red = source.red; break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { composite.green = source.green; break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { composite.blue = source.blue; break; } case CopyOpacityCompositeOp: { if (source.matte == MagickFalse) composite.opacity = (MagickRealType) (QuantumRange - MagickPixelIntensityToQuantum(&source)); else composite.opacity = source.opacity; break; } case CopyBlackCompositeOp: { if (source.colorspace != CMYKColorspace) ConvertRGBToCMYK(&source); composite.index = QuantumRange - source.index; break; } /* compose methods that are already handled */ case BlurCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: { composite = source; break; } default: break; } if (image->colorspace == CMYKColorspace) { composite.red = (MagickRealType) QuantumRange - composite.red; composite.green = (MagickRealType) QuantumRange - composite.green; composite.blue = (MagickRealType) QuantumRange - composite.blue; composite.index = (MagickRealType) QuantumRange - composite.index; } SetPixelRed(q, clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q, clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q, clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); SetPixelOpacity(q, clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes + x, clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); p++; if (p >= (pixels + source_image->columns)) p = pixels; q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, CompositeImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } source_view = DestroyCacheView(source_view); image_view = DestroyCacheView(image_view); if (canvas_image != (Image *) NULL) canvas_image = DestroyImage(canvas_image); else source_image = DestroyImage(source_image); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % T e x t u r e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % TextureImage() repeatedly tiles the texture image across and * down the image % canvas. % % The format of the TextureImage method is: % * % MagickBooleanType TextureImage(Image *image,const Image *texture) % * % A description of each parameter follows: % % o image: the image. % % * o texture: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image * image, const Image * texture) { #define TextureImageTag "Texture/Image" CacheView * image_view, *texture_view; ExceptionInfo * exception; Image * texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *)NULL) return (MagickFalse); if (SetImageStorageClass(image, DirectClass) == MagickFalse) return (MagickFalse); exception = (&image->exception); texture_image = CloneImage(texture, 0, 0, MagickTrue, exception); if (texture_image == (const Image *)NULL) return (MagickFalse); (void)TransformImageColorspace(texture_image, image->colorspace); (void)SetImageVirtualPixelMethod(texture_image, TileVirtualPixelMethod); status = MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->matte != MagickFalse) || (texture_image->matte != MagickFalse))) { /* * Tile texture onto the image background. */ for (y = 0; y < (ssize_t) image->rows; y += (ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x = 0; x < (ssize_t) image->columns; x += (ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status = CompositeImage(image, image->compose, texture_image, x + texture_image->tile_offset.x, y + texture_image->tile_offset.y); if (thread_status == MagickFalse) { status = thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, TextureImageTag, (MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } (void)SetImageProgress(image, TextureImageTag, (MagickOffsetType) image->rows, image->rows); texture_image = DestroyImage(texture_image); return (status); } /* * Tile texture onto the image background (optimized). */ status = MagickTrue; texture_view = AcquireVirtualCacheView(texture_image, exception); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket * texture_indexes; register const PixelPacket * p; register IndexPacket * indexes; register ssize_t x; register PixelPacket * q; size_t width; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(texture_view, texture_image->tile_offset.x, (y + texture_image->tile_offset.y) % texture_image->rows, texture_image->columns, 1, exception); q = QueueCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if ((p == (const PixelPacket *)NULL) || (q == (PixelPacket *) NULL)) { status = MagickFalse; continue; } texture_indexes = GetCacheViewVirtualIndexQueue(texture_view); indexes = GetCacheViewAuthenticIndexQueue(image_view); for (x = 0; x < (ssize_t) image->columns; x += (ssize_t) texture_image->columns) { width = texture_image->columns; if ((x + (ssize_t) width) > (ssize_t) image->columns) width = image->columns - x; (void)CopyMagickMemory(q, p, width * sizeof(*p)); if ((image->colorspace == CMYKColorspace) && (texture_image->colorspace == CMYKColorspace)) { (void)CopyMagickMemory(indexes, texture_indexes, width * sizeof(*indexes)); indexes += width; } q += width; } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, TextureImageTag, (MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } texture_view = DestroyCacheView(texture_view); image_view = DestroyCacheView(image_view); texture_image = DestroyImage(texture_image); return (status); }
/* * Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/draw.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/memory_.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/resample.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o m p o s i t e I m a g e C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CompositeImageChannel() returns the second image composited onto * the first % at the specified offset, using the specified composite * method. % % The format of the CompositeImageChannel method is: % % * MagickBooleanType CompositeImage(Image *image, % const * CompositeOperator compose,Image *source_image, % const ssize_t * x_offset,const ssize_t y_offset) % MagickBooleanType * CompositeImageChannel(Image *image, % const ChannelType * channel,const CompositeOperator compose, % Image * *source_image,const ssize_t x_offset,const ssize_t y_offset) % % A * description of each parameter follows: % % o image: the canvas image, * modified by he composition % % o channel: the channel. % % o * compose: This operator affects how the composite is applied to % the * image. The operators and how they are utilized are listed here % * http://www.w3.org/TR/SVG12/#compositing. % % o source_image: the * composite (source) image. % % o x_offset: the column offset of the * composited image. % % o y_offset: the row offset of the composited * image. % % Extra Controls from Image meta-data in 'source_image' * (artifacts) % % o "compose:args" % A string containing extra * numerical arguments for specific compose % methods, generally * expressed as a 'geometry' or a comma separated list % of numbers. % * % Compose methods needing such arguments include "BlendCompositeOp" * and % "DisplaceCompositeOp". % % o "compose:outside-overlay" % * Modify how the composition is to effect areas not directly covered % * by the 'source_image' at the offset given. Normally this is % * dependant on the 'compose' method, especially Duff-Porter methods. % % * If set to "false" then disable all normal handling of pixels not % * covered by the source_image. Typically used for repeated tiling % * of the source_image by the calling API. % % Previous to IM v6.5.3-3 * this was called "modify-outside-overlay" % */ /* * * Programmers notes on SVG specification. * * * A Composition is defined by... * Color Function : f(Sc,Dc) where Sc and * Dc are the normizalized colors * Blending areas : X = 1 for area of * overlap ie: f(Sc,Dc) * Y = 1 for source * preserved * Z = 1 for canvas preserved * * * Conversion to transparency (then optimized) * Dca' = f(Sc, Dc)*Sa*Da + * Y*Sca*(1-Da) + Z*Dca*(1-Sa) * Da' = X*Sa*Da + Y*Sa*(1-Da) + * Z*Da*(1-Sa) * * * Where... * Sca = Sc*Sa normalized Source color divided by Source * alpha * Dca = Dc*Da normalized Dest color divided by Dest alpha * * Dc' = Dca'/Da' the desired color value for this channel. * * * Da' in in the follow formula as 'gamma' The resulting alpla value. * * * * Most functions use a blending mode of over (X=1,Y=1,Z=1) * this results in * the following optimizations... * gamma = Sa+Da-Sa*Da; * gamma = 1 - * QuantiumScale*alpha * QuantiumScale*beta; * opacity = * QuantiumScale*alpha*beta; // over blend, optimized 1-Gamma * * * The above SVG definitions also definate that Mathematical Composition * * methods should use a 'Over' blending mode for Alpha Channel. * It however * was not applied for composition modes of 'Plus', 'Minus', * the modulus * versions of 'Add' and 'Subtract'. * * * * Mathematical operator changes to be applied from IM v6.7... * * * 1/ Modulus modes 'Add' and 'Subtract' are obsoleted and renamed * * 'ModulusAdd' and 'ModulusSubtract' for clarity. * * * 2/ All mathematical compositions work as per the SVG specification * * with regard to blending. This now includes 'ModulusAdd' and * * 'ModulusSubtract'. * * * 3/ When the special channel flag 'sync' (syncronize channel updates) * * is turned off (enabled by default) then mathematical compositions are * * only performed on the channels specified, and are applied * * independantally of each other. In other words the mathematics is * * performed as 'pure' mathematical operations, rather than as image * * operations. */ static inline MagickRealType Atop(const MagickRealType p, const MagickRealType Sa, const MagickRealType q, const MagickRealType magick_unused(Da)) { magick_unreferenced(Da); return (p * Sa + q * (1.0 - Sa)); /* Da optimized out, Da/gamma => 1.0 */ } static inline void CompositeAtop(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ composite->opacity = q->opacity; /* optimized Da = 1.0-Gamma */ composite->red = Atop(p->red, Sa, q->red, 1.0); composite->green = Atop(p->green, Sa, q->green, 1.0); composite->blue = Atop(p->blue, Sa, q->blue, 1.0); if (q->colorspace == CMYKColorspace) composite->index = Atop(p->index, Sa, q->index, 1.0); } /* * What is this Composition method for? Can't find any specification! WARNING * this is not doing correct 'over' blend handling (Anthony Thyssen). */ static inline void CompositeBumpmap(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType intensity; intensity = MagickPixelIntensity(p); composite->red = QuantumScale * intensity * q->red; composite->green = QuantumScale * intensity * q->green; composite->blue = QuantumScale * intensity * q->blue; composite->opacity = (MagickRealType) QuantumScale *intensity * p->opacity; if (q->colorspace == CMYKColorspace) composite->index = QuantumScale * intensity * q->index; } static inline void CompositeClear(const MagickPixelPacket * q, MagickPixelPacket * composite) { composite->opacity = (MagickRealType) TransparentOpacity; composite->red = 0.0; composite->green = 0.0; composite->blue = 0.0; if (q->colorspace == CMYKColorspace) composite->index = 0.0; } static MagickRealType ColorBurn(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { #if 0 /* * Oct 2004 SVG specification. */ if (Sca * Da + Dca * Sa <= Sa * Da) return (Sca * (1.0 - Da) + Dca * (1.0 - Sa)); return (Sa * (Sca * Da + Dca * Sa - Sa * Da) / Sca + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); #else /* * March 2009 SVG specification. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca - Da) < MagickEpsilon)) return (Sa * Da + Dca * (1.0 - Sa)); if (Sca < MagickEpsilon) return (Dca * (1.0 - Sa)); return (Sa * Da - Sa * MagickMin(Da, (Da - Dca) * Sa / Sca) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); #endif } static inline void CompositeColorBurn(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * ColorBurn(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * ColorBurn(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * ColorBurn(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * ColorBurn(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } static MagickRealType ColorDodge(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* * Oct 2004 SVG specification. */ if ((Sca * Da + Dca * Sa) >= Sa * Da) return (Sa * Da + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); return (Dca * Sa * Sa / (Sa - Sca) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); #if 0 /* * New specification, March 2009 SVG specification. This specification * was also wrong of non-overlap cases. */ if ((fabs(Sca - Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return (Sca * (1.0 - Da)); if (fabs(Sca - Sa) < MagickEpsilon) return (Sa * Da + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); return (Sa * MagickMin(Da, Dca * Sa / (Sa - Sca))); #endif #if 0 /* * Working from first principles using the original formula: * * f(Sc,Dc) = Dc/(1-Sc) * * This works correctly! Looks like the 2004 model was right but just * required a extra condition for correct handling. */ if ((fabs(Sca - Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return (Sca * (1.0 - Da) + Dca * (1.0 - Sa)); if (fabs(Sca - Sa) < MagickEpsilon) return (Sa * Da + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); return (Dca * Sa * Sa / (Sa - Sca) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); #endif } static inline void CompositeColorDodge(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * ColorDodge(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * ColorDodge(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * ColorDodge(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * ColorDodge(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } static inline MagickRealType Darken(const MagickRealType p, const MagickRealType alpha, const MagickRealType q, const MagickRealType beta) { if (p < q) return (MagickOver_(p, alpha, q, beta)); /* src-over */ return (MagickOver_(q, beta, p, alpha)); /* dst-over */ } static inline void CompositeDarken(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { /* * Darken is equivalent to a 'Minimum' method OR a greyscale version of a * binary 'Or' OR the 'Intersection' of pixel sets. */ double gamma; if ((channel & SyncChannels) != 0) { composite->opacity = QuantumScale * p->opacity * q->opacity; /* Over Blend */ gamma = 1.0 - QuantumScale * composite->opacity; gamma = PerceptibleReciprocal(gamma); composite->red = gamma * Darken(p->red, p->opacity, q->red, q->opacity); composite->green = gamma * Darken(p->green, p->opacity, q->green, q->opacity); composite->blue = gamma * Darken(p->blue, p->opacity, q->blue, q->opacity); if (q->colorspace == CMYKColorspace) composite->index = gamma * Darken(p->index, p->opacity, q->index, q->opacity); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = MagickMax(p->opacity, q->opacity); if ((channel & RedChannel) != 0) composite->red = MagickMin(p->red, q->red); if ((channel & GreenChannel) != 0) composite->green = MagickMin(p->green, q->green); if ((channel & BlueChannel) != 0) composite->blue = MagickMin(p->blue, q->blue); if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = MagickMin(p->index, q->index); } } static inline void CompositeDarkenIntensity(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { /* * Select the pixel based on the intensity level. If 'Sync' flag select * whole pixel based on alpha weighted intensity. Otherwise use intensity * only, but restrict copy according to channel. */ if ((channel & SyncChannels) != 0) { MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; Da = 1.0 - QuantumScale * q->opacity; *composite = (Sa * MagickPixelIntensity(p) < Da * MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) < MagickPixelIntensity(q)); if ((channel & AlphaChannel) != 0) composite->opacity = from_p ? p->opacity : q->opacity; if ((channel & RedChannel) != 0) composite->red = from_p ? p->red : q->red; if ((channel & GreenChannel) != 0) composite->green = from_p ? p->green : q->green; if ((channel & BlueChannel) != 0) composite->blue = from_p ? p->blue : q->blue; if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } static inline MagickRealType Difference(const MagickRealType p, const MagickRealType Sa, const MagickRealType q, const MagickRealType Da) { /* Optimized by Multipling by QuantumRange (taken from gamma). */ return (Sa * p + Da * q - Sa * Da * 2.0 * MagickMin(p, q)); } static inline void CompositeDifference(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { double gamma; MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; if ((channel & SyncChannels) != 0) { gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per * SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); /* Values are not normalized as an optimization. */ composite->red = gamma * Difference(p->red, Sa, q->red, Da); composite->green = gamma * Difference(p->green, Sa, q->green, Da); composite->blue = gamma * Difference(p->blue, Sa, q->blue, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * Difference(p->index, Sa, q->index, Da); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange - fabs((double)(p->opacity - q->opacity)); if ((channel & RedChannel) != 0) composite->red = fabs((double)(p->red - q->red)); if ((channel & GreenChannel) != 0) composite->green = fabs((double)(p->green - q->green)); if ((channel & BlueChannel) != 0) composite->blue = fabs((double)(p->blue - q->blue)); if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = fabs((double)(p->index - q->index)); } } static MagickRealType Divide(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* * Divide Source by Destination * * f(Sc,Dc) = Sc / Dc * * But with appropriate handling for special case of Dc == 0 specifically so * that f(Black,Black)=Black and f(non-Black,Black)=White. It is * however also important to correctly do 'over' alpha blending which is * why the formula becomes so complex. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return (Sca * (1.0 - Da) + Dca * (1.0 - Sa)); if (fabs(Dca) < MagickEpsilon) return (Sa * Da + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); return (Sca * Da * Da / Dca + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); } static inline void CompositeDivide(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; if ((channel & SyncChannels) != 0) { gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per * SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * Divide(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * Divide(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * Divide(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * Divide(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange * (1.0 - Divide(Sa, 1.0, Da, 1.0)); if ((channel & RedChannel) != 0) composite->red = QuantumRange * Divide(QuantumScale * p->red, 1.0, QuantumScale * q->red, 1.0); if ((channel & GreenChannel) != 0) composite->green = QuantumRange * Divide(QuantumScale * p->green, 1.0, QuantumScale * q->green, 1.0); if ((channel & BlueChannel) != 0) composite->blue = QuantumRange * Divide(QuantumScale * p->blue, 1.0, QuantumScale * q->blue, 1.0); if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = QuantumRange * Divide(QuantumScale * p->index, 1.0, QuantumScale * q->index, 1.0); } } static MagickRealType Exclusion(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { return (Sca * Da + Dca * Sa - 2.0 * Sca * Dca + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); } static inline void CompositeExclusion(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { MagickRealType gamma, Sa, Da; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; if ((channel & SyncChannels) != 0) { gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per * SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * Exclusion(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * Exclusion(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * Exclusion(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * Exclusion(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange * (1.0 - Exclusion(Sa, 1.0, Da, 1.0)); if ((channel & RedChannel) != 0) composite->red = QuantumRange * Exclusion(QuantumScale * p->red, 1.0, QuantumScale * q->red, 1.0); if ((channel & GreenChannel) != 0) composite->green = QuantumRange * Exclusion(QuantumScale * p->green, 1.0, QuantumScale * q->green, 1.0); if ((channel & BlueChannel) != 0) composite->blue = QuantumRange * Exclusion(QuantumScale * p->blue, 1.0, QuantumScale * q->blue, 1.0); if (((channel & IndexChannel) != 0) && (q->colorspace == CMYKColorspace)) composite->index = QuantumRange * Exclusion(QuantumScale * p->index, 1.0, QuantumScale * q->index, 1.0); } } static MagickRealType HardLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { if ((2.0 * Sca) < Sa) return (2.0 * Sca * Dca + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); return (Sa * Da - 2.0 * (Da - Dca) * (Sa - Sca) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); } static inline void CompositeHardLight(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * HardLight(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * HardLight(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * HardLight(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * HardLight(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } static MagickRealType HardMix(const MagickRealType Sca, const MagickRealType Dca) { if ((Sca + Dca) < QuantumRange) return (0.0); else return (1.0); } static inline void CompositeHardMix(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * HardMix(p->red * Sa, q->red * Da); composite->green = gamma * HardMix(p->green * Sa, q->green * Da); composite->blue = gamma * HardMix(p->blue * Sa, q->blue * Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * HardMix(p->index * Sa, q->index * Da); } static void HCLComposite(const double hue, const double chroma, const double luma, MagickRealType * red, MagickRealType * green, MagickRealType * blue) { double b, c, g, h, m, r, x; /* * Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h = 6.0 * hue; c = chroma; x = c * (1.0 - fabs(fmod(h, 2.0) - 1.0)); r = 0.0; g = 0.0; b = 0.0; if ((0.0 <= h) && (h < 1.0)) { r = c; g = x; } else if ((1.0 <= h) && (h < 2.0)) { r = x; g = c; } else if ((2.0 <= h) && (h < 3.0)) { g = c; b = x; } else if ((3.0 <= h) && (h < 4.0)) { g = x; b = c; } else if ((4.0 <= h) && (h < 5.0)) { r = x; b = c; } else if ((5.0 <= h) && (h < 6.0)) { r = c; b = x; } m = luma - (0.298839 * r + 0.586811 * g + 0.114350 * b); *red = QuantumRange * (r + m); *green = QuantumRange * (g + m); *blue = QuantumRange * (b + m); } static void CompositeHCL(const MagickRealType red, const MagickRealType green, const MagickRealType blue, double *hue, double *chroma, double *luma) { double b, c, g, h, max, r; /* * Convert RGB to HCL colorspace. */ assert(hue != (double *)NULL); assert(chroma != (double *)NULL); assert(luma != (double *)NULL); r = (double)red; g = (double)green; b = (double)blue; max = MagickMax(r, MagickMax(g, b)); c = max - (double)MagickMin(r, MagickMin(g, b)); h = 0.0; if (c == 0) h = 0.0; else if (red == (MagickRealType) max) h = fmod((g - b) / c + 6.0, 6.0); else if (green == (MagickRealType) max) h = ((b - r) / c) + 2.0; else if (blue == (MagickRealType) max) h = ((r - g) / c) + 4.0; *hue = (h / 6.0); *chroma = QuantumScale * c; *luma = QuantumScale * (0.298839 * r + 0.586811 * g + 0.114350 * b); } static inline MagickRealType In(const MagickRealType p, const MagickRealType Sa, const MagickRealType magick_unused(q), const MagickRealType Da) { magick_unreferenced(q); return (Sa * p * Da); } static inline void CompositeIn(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { double gamma; MagickRealType Sa, Da; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = Sa * Da; composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); composite->red = gamma * In(p->red, Sa, q->red, Da); composite->green = gamma * In(p->green, Sa, q->green, Da); composite->blue = gamma * In(p->blue, Sa, q->blue, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * In(p->index, Sa, q->index, Da); } static inline MagickRealType Lighten(const MagickRealType p, const MagickRealType alpha, const MagickRealType q, const MagickRealType beta) { if (p > q) return (MagickOver_(p, alpha, q, beta)); /* src-over */ return (MagickOver_(q, beta, p, alpha)); /* dst-over */ } static inline void CompositeLighten(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { /* * Lighten is also equvalent to a 'Maximum' method OR a greyscale version * of a binary 'And' OR the 'Union' of pixel sets. */ double gamma; if ((channel & SyncChannels) != 0) { composite->opacity = QuantumScale * p->opacity * q->opacity; /* Over Blend */ gamma = 1.0 - QuantumScale * composite->opacity; gamma = PerceptibleReciprocal(gamma); composite->red = gamma * Lighten(p->red, p->opacity, q->red, q->opacity); composite->green = gamma * Lighten(p->green, p->opacity, q->green, q->opacity); composite->blue = gamma * Lighten(p->blue, p->opacity, q->blue, q->opacity); if (q->colorspace == CMYKColorspace) composite->index = gamma * Lighten(p->index, p->opacity, q->index, q->opacity); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = MagickMin(p->opacity, q->opacity); if ((channel & RedChannel) != 0) composite->red = MagickMax(p->red, q->red); if ((channel & GreenChannel) != 0) composite->green = MagickMax(p->green, q->green); if ((channel & BlueChannel) != 0) composite->blue = MagickMax(p->blue, q->blue); if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = MagickMax(p->index, q->index); } } static inline void CompositeLightenIntensity(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { /* * Select the pixel based on the intensity level. If 'Sync' flag select * whole pixel based on alpha weighted intensity. Otherwise use Intenisty * only, but restrict copy according to channel. */ if ((channel & SyncChannels) != 0) { MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; Da = 1.0 - QuantumScale * q->opacity; *composite = (Sa * MagickPixelIntensity(p) > Da * MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) > MagickPixelIntensity(q)); if ((channel & AlphaChannel) != 0) composite->opacity = from_p ? p->opacity : q->opacity; if ((channel & RedChannel) != 0) composite->red = from_p ? p->red : q->red; if ((channel & GreenChannel) != 0) composite->green = from_p ? p->green : q->green; if ((channel & BlueChannel) != 0) composite->blue = from_p ? p->blue : q->blue; if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } #if 0 static inline MagickRealType LinearDodge(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* * LinearDodge: simplifies to a trivial formula f(Sc,Dc) = Sc + Dc Dca' = * Sca + Dca */ return (Sca + Dca); } #endif static inline void CompositeLinearDodge(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { double gamma; MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); composite->red = gamma * (p->red * Sa + q->red * Da); composite->green = gamma * (p->green * Sa + q->green * Da); composite->blue = gamma * (p->blue * Sa + q->blue * Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * (p->index * Sa + q->index * Da); } static inline MagickRealType LinearBurn(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* * LinearBurn: as defined by Abode Photoshop, according to * http://www.simplefilter.de/en/basics/mixmods.html is: * * f(Sc,Dc) = Sc + Dc - 1 */ return (Sca + Dca - Sa * Da); } static inline void CompositeLinearBurn(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * LinearBurn(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * LinearBurn(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * LinearBurn(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * LinearBurn(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } static inline MagickRealType LinearLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { #if 0 /* * Previous formula, was only valid for fully-opaque images. */ return (Dca + 2 * Sca - 1.0); #else /* * LinearLight: as defined by Abode Photoshop, according to * http://www.simplefilter.de/en/basics/mixmods.html is: * * f(Sc,Dc) = Dc + 2*Sc - 1 */ return ((Sca - Sa) * Da + Sca + Dca); #endif } static inline void CompositeLinearLight(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * LinearLight(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * LinearLight(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * LinearLight(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * LinearLight(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } static inline MagickRealType Mathematics(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da, const GeometryInfo * geometry_info) { /* * 'Mathematics' a free form user control mathematical composition is * defined as... * * f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D * * Where the arguments A,B,C,D are (currently) passed to composite as a * command separated 'geometry' string in "compose:args" image artifact. * * A = a->rho, B = a->sigma, C = a->xi, D = a->psi * * Applying the SVG transparency formula (see above), we get... * * Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) * * Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + * Dca*(1.0-Sa) */ return (geometry_info->rho * Sca * Dca + geometry_info->sigma * Sca * Da + geometry_info->xi * Dca * Sa + geometry_info->psi * Sa * Da + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); } static inline void CompositeMathematics(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, const GeometryInfo * args, MagickPixelPacket * composite) { double gamma; MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* ??? - AT */ Da = 1.0 - QuantumScale * q->opacity; if ((channel & SyncChannels) != 0) { gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per * SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * Mathematics(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da, args); composite->green = gamma * Mathematics(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da, args); composite->blue = gamma * Mathematics(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da, args); if (q->colorspace == CMYKColorspace) composite->index = gamma * Mathematics(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da, args); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange * (1.0 - Mathematics(Sa, 1.0, Da, 1.0, args)); if ((channel & RedChannel) != 0) composite->red = QuantumRange * Mathematics(QuantumScale * p->red, 1.0, QuantumScale * q->red, 1.0, args); if ((channel & GreenChannel) != 0) composite->green = QuantumRange * Mathematics(QuantumScale * p->green, 1.0, QuantumScale * q->green, 1.0, args); if ((channel & BlueChannel) != 0) composite->blue = QuantumRange * Mathematics(QuantumScale * p->blue, 1.0, QuantumScale * q->blue, 1.0, args); if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = QuantumRange * Mathematics(QuantumScale * p->index, 1.0, QuantumScale * q->index, 1.0, args); } } static inline void CompositePlus(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { if ((channel & SyncChannels) != 0) { /* * NOTE: "Plus" does not use 'over' alpha-blending but uses a special * 'plus' form of alph-blending. It is the ONLY mathematical operator * to do this. this is what makes it different to the otherwise * equivalent "LinearDodge" composition method. * * Note however that color channels are still effected by the alpha * channel as a result of the blending, making it just as useless for * independant channel maths, just like all other mathematical * composition methods. * * As such the removal of the 'sync' flag, is still a usful convention. * * The MagickPixelCompositePlus() function is defined in * "composite-private.h" so it can also be used for Image Blending. */ MagickPixelCompositePlus(p, p->opacity, q, q->opacity, composite); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = p->opacity + q->opacity - QuantumRange; if ((channel & RedChannel) != 0) composite->red = p->red + q->red; if ((channel & GreenChannel) != 0) composite->green = p->green + q->green; if ((channel & BlueChannel) != 0) composite->blue = p->blue + q->blue; if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = p->index + q->index; } } static inline MagickRealType Minus(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType magick_unused(Da)) { /* * Minus Source from Destination * * f(Sc,Dc) = Sc - Dc * */ magick_unreferenced(Da); return (Sca + Dca - 2 * Dca * Sa); } static inline void CompositeMinus(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { double gamma; MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; if ((channel & SyncChannels) != 0) { gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per * SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); composite->red = gamma * Minus(p->red * Sa, Sa, q->red * Da, Da); composite->green = gamma * Minus(p->green * Sa, Sa, q->green * Da, Da); composite->blue = gamma * Minus(p->blue * Sa, Sa, q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * Minus(p->index * Sa, Sa, q->index * Da, Da); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange * (1.0 - (Sa - Da)); if ((channel & RedChannel) != 0) composite->red = p->red - q->red; if ((channel & GreenChannel) != 0) composite->green = p->green - q->green; if ((channel & BlueChannel) != 0) composite->blue = p->blue - q->blue; if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = p->index - q->index; } } static inline MagickRealType ModulusAdd(const MagickRealType p, const MagickRealType Sa, const MagickRealType q, const MagickRealType Da) { MagickRealType pixel; pixel = p + q; while (pixel > QuantumRange) pixel -= QuantumRange; while (pixel < 0.0) pixel += QuantumRange; return (pixel * Sa * Da + p * Sa * (1.0 - Da) + q * Da * (1.0 - Sa)); } static inline void CompositeModulusAdd(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { if ((channel & SyncChannels) != 0) { double gamma; MagickRealType Sa, Da; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per * SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); composite->red = ModulusAdd(p->red, Sa, q->red, Da); composite->green = ModulusAdd(p->green, Sa, q->green, Da); composite->blue = ModulusAdd(p->blue, Sa, q->blue, Da); if (q->colorspace == CMYKColorspace) composite->index = ModulusAdd(p->index, Sa, q->index, Da); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange - ModulusAdd(QuantumRange - p->opacity, 1.0, QuantumRange - q->opacity, 1.0); if ((channel & RedChannel) != 0) composite->red = ModulusAdd(p->red, 1.0, q->red, 1.0); if ((channel & GreenChannel) != 0) composite->green = ModulusAdd(p->green, 1.0, q->green, 1.0); if ((channel & BlueChannel) != 0) composite->blue = ModulusAdd(p->blue, 1.0, q->blue, 1.0); if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = ModulusAdd(p->index, 1.0, q->index, 1.0); } } static inline MagickRealType ModulusSubtract(const MagickRealType p, const MagickRealType Sa, const MagickRealType q, const MagickRealType Da) { MagickRealType pixel; pixel = p - q; while (pixel > QuantumRange) pixel -= QuantumRange; while (pixel < 0.0) pixel += QuantumRange; return (pixel * Sa * Da + p * Sa * (1.0 - Da) + q * Da * (1.0 - Sa)); } static inline void CompositeModulusSubtract(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { if ((channel & SyncChannels) != 0) { double gamma; MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); composite->red = ModulusSubtract(p->red, Sa, q->red, Da); composite->green = ModulusSubtract(p->green, Sa, q->green, Da); composite->blue = ModulusSubtract(p->blue, Sa, q->blue, Da); if (q->colorspace == CMYKColorspace) composite->index = ModulusSubtract(p->index, Sa, q->index, Da); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange - ModulusSubtract(QuantumRange - p->opacity, 1.0, QuantumRange - q->opacity, 1.0); if ((channel & RedChannel) != 0) composite->red = ModulusSubtract(p->red, 1.0, q->red, 1.0); if ((channel & GreenChannel) != 0) composite->green = ModulusSubtract(p->green, 1.0, q->green, 1.0); if ((channel & BlueChannel) != 0) composite->blue = ModulusSubtract(p->blue, 1.0, q->blue, 1.0); if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = ModulusSubtract(p->index, 1.0, q->index, 1.0); } } static inline MagickRealType Multiply(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { return (Sca * Dca + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); } static inline void CompositeMultiply(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; if ((channel & SyncChannels) != 0) { gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per * SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * Multiply(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * Multiply(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * Multiply(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * Multiply(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange * (1.0 - Sa * Da); if ((channel & RedChannel) != 0) composite->red = QuantumScale * p->red * q->red; if ((channel & GreenChannel) != 0) composite->green = QuantumScale * p->green * q->green; if ((channel & BlueChannel) != 0) composite->blue = QuantumScale * p->blue * q->blue; if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = QuantumScale * p->index * q->index; } } static inline MagickRealType Out(const MagickRealType p, const MagickRealType Sa, const MagickRealType magick_unused(q), const MagickRealType Da) { magick_unreferenced(q); return (Sa * p * (1.0 - Da)); } static inline void CompositeOut(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { double gamma; MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = Sa * (1.0 - Da); composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); composite->red = gamma * Out(p->red, Sa, q->red, Da); composite->green = gamma * Out(p->green, Sa, q->green, Da); composite->blue = gamma * Out(p->blue, Sa, q->blue, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * Out(p->index, Sa, q->index, Da); } static MagickRealType PegtopLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* * PegTop: A Soft-Light alternative: A continuous version of the * Softlight function, producing very similar results. * * f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc * * See http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs(Da) < MagickEpsilon) return (Sca); return (Dca * Dca * (Sa - 2.0 * Sca) / Da + Sca * (2.0 * Dca + 1.0 - Da) + Dca * (1.0 - Sa)); } static inline void CompositePegtopLight(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * PegtopLight(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * PegtopLight(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * PegtopLight(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * PegtopLight(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } static MagickRealType PinLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* * PinLight: A Photoshop 7 composition method * http://www.simplefilter.de/en/basics/mixmods.html * * f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if (Dca * Sa < Da * (2 * Sca - Sa)) return (Sca * (Da + 1.0) - Sa * Da + Dca * (1.0 - Sa)); if ((Dca * Sa) > (2 * Sca * Da)) return (Sca * Da + Sca + Dca * (1.0 - Sa)); return (Sca * (1.0 - Da) + Dca); } static inline void CompositePinLight(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * PinLight(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * PinLight(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * PinLight(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * PinLight(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } static inline MagickRealType Screen(const MagickRealType Sca, const MagickRealType Dca) { /* * Screen: A negated multiply f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ return (Sca + Dca - Sca * Dca); } static inline void CompositeScreen(const MagickPixelPacket * p, const MagickPixelPacket * q, const ChannelType channel, MagickPixelPacket * composite) { double gamma; MagickRealType Da, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; if ((channel & SyncChannels) != 0) { gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per * SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); Sa *= (MagickRealType) QuantumScale; Da *= (MagickRealType) QuantumScale; /* optimization */ gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * Screen(p->red * Sa, q->red * Da); composite->green = gamma * Screen(p->green * Sa, q->green * Da); composite->blue = gamma * Screen(p->blue * Sa, q->blue * Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * Screen(p->index * Sa, q->index * Da); } else { /* handle channels as separate grayscale * channels */ if ((channel & AlphaChannel) != 0) composite->opacity = QuantumRange * (1.0 - Screen(Sa, Da)); if ((channel & RedChannel) != 0) composite->red = QuantumRange * Screen(QuantumScale * p->red, QuantumScale * q->red); if ((channel & GreenChannel) != 0) composite->green = QuantumRange * Screen(QuantumScale * p->green, QuantumScale * q->green); if ((channel & BlueChannel) != 0) composite->blue = QuantumRange * Screen(QuantumScale * p->blue, QuantumScale * q->blue); if ((channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = QuantumRange * Screen(QuantumScale * p->index, QuantumScale * q->index); } } static MagickRealType SoftLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { #if 0 /* * Oct 2004 SVG specification -- was found to be incorrect See * http://lists.w3.org/Archives/Public/www-svg/2009Feb/0014.html. */ if (2.0 * Sca < Sa) return (Dca * (Sa - (1.0 - Dca / Da) * (2.0 * Sca - Sa)) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); if (8.0 * Dca <= Da) return (Dca * (Sa - (1.0 - Dca / Da) * (2.0 * Sca - Sa) * (3.0 - 8.0 * Dca / Da)) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); return ((Dca * Sa + (pow(Dca / Da, 0.5) * Da - Dca) * (2.0 * Sca - Sa)) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); #else MagickRealType alpha, beta; /* * New specification: March 2009 SVG specification. */ alpha = Dca / Da; if ((2.0 * Sca) < Sa) return (Dca * (Sa + (2.0 * Sca - Sa) * (1.0 - alpha)) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); if (((2.0 * Sca) > Sa) && ((4.0 * Dca) <= Da)) { beta = Dca * Sa + Da * (2.0 * Sca - Sa) * (4.0 * alpha * (4.0 * alpha + 1.0) * (alpha - 1.0) + 7.0 * alpha) + Sca * (1.0 - Da) + Dca * (1.0 - Sa); return (beta); } beta = Dca * Sa + Da * (2.0 * Sca - Sa) * (pow(alpha, 0.5) - alpha) + Sca * (1.0 - Da) + Dca * (1.0 - Sa); return (beta); #endif } static inline void CompositeSoftLight(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * SoftLight(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * SoftLight(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * SoftLight(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * SoftLight(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } /* * Deprecated Multiply difference by amount, if differance larger than * threshold??? What use this is is completely unknown The Opacity * calculation appears to be inverted -- Anthony Thyssen */ static inline MagickRealType Threshold(const MagickRealType p, const MagickRealType q, const MagickRealType threshold, const MagickRealType amount) { MagickRealType delta; delta = p - q; if ((MagickRealType) fabs((double)(2.0 * delta)) < threshold) return (q); return (q + delta * amount); } static inline void CompositeThreshold(const MagickPixelPacket * p, const MagickPixelPacket * q, const MagickRealType threshold, const MagickRealType amount, MagickPixelPacket * composite) { composite->red = Threshold(p->red, q->red, threshold, amount); composite->green = Threshold(p->green, q->green, threshold, amount); composite->blue = Threshold(p->blue, q->blue, threshold, amount); composite->opacity = QuantumRange - Threshold(p->opacity, q->opacity, threshold, amount); if (q->colorspace == CMYKColorspace) composite->index = Threshold(p->index, q->index, threshold, amount); } static MagickRealType VividLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* * VividLight: A Photoshop 7 composition method. See * http://www.simplefilter.de/en/basics/mixmods.html. * * f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs(Sa) < MagickEpsilon) || (fabs(Sca - Sa) < MagickEpsilon)) return (Sa * Da + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); if ((2 * Sca) <= Sa) return (Sa * (Da + Sa * (Dca - Da) / (2.0 * Sca)) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); return (Dca * Sa * Sa / (2.0 * (Sa - Sca)) + Sca * (1.0 - Da) + Dca * (1.0 - Sa)); } static inline void CompositeVividLight(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = RoundToUnity(Sa + Da - Sa * Da); /* over blend, as per SVG doc */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = QuantumRange / (fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red = gamma * VividLight(QuantumScale * p->red * Sa, Sa, QuantumScale * q->red * Da, Da); composite->green = gamma * VividLight(QuantumScale * p->green * Sa, Sa, QuantumScale * q->green * Da, Da); composite->blue = gamma * VividLight(QuantumScale * p->blue * Sa, Sa, QuantumScale * q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * VividLight(QuantumScale * p->index * Sa, Sa, QuantumScale * q->index * Da, Da); } static MagickRealType Xor(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { return (Sca * (1.0 - Da) + Dca * (1.0 - Sa)); } static inline void CompositeXor(const MagickPixelPacket * p, const MagickPixelPacket * q, MagickPixelPacket * composite) { MagickRealType Da, gamma, Sa; Sa = 1.0 - QuantumScale * p->opacity; /* simplify and speed up * equations */ Da = 1.0 - QuantumScale * q->opacity; gamma = Sa + Da - 2 * Sa * Da; /* Xor blend mode X=0,Y=1,Z=1 */ composite->opacity = (MagickRealType) QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); composite->red = gamma * Xor(p->red * Sa, Sa, q->red * Da, Da); composite->green = gamma * Xor(p->green * Sa, Sa, q->green * Da, Da); composite->blue = gamma * Xor(p->blue * Sa, Sa, q->blue * Da, Da); if (q->colorspace == CMYKColorspace) composite->index = gamma * Xor(p->index * Sa, Sa, q->index * Da, Da); } MagickExport MagickBooleanType CompositeImage(Image * image, const CompositeOperator compose, const Image * source_image, const ssize_t x_offset, const ssize_t y_offset) { MagickBooleanType status; status = CompositeImageChannel(image, DefaultChannels, compose, source_image, x_offset, y_offset); return (status); } MagickExport MagickBooleanType CompositeImageChannel(Image * image, const ChannelType channel, const CompositeOperator compose, const Image * composite, const ssize_t x_offset, const ssize_t y_offset) { #define CompositeImageTag "Composite/Image" CacheView * source_view, *image_view; const char *value; ExceptionInfo * exception; GeometryInfo geometry_info; Image * canvas_image, *source_image; MagickBooleanType clamp, clip_to_self, status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; /* * Prepare composite image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image, DirectClass) == MagickFalse) return (MagickFalse); exception = (&image->exception); source_image = CloneImage(composite, 0, 0, MagickTrue, exception); if (source_image == (const Image *)NULL) return (MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void)SetImageColorspace(image, sRGBColorspace); (void)SetImageColorspace(source_image, image->colorspace); GetMagickPixelPacket(image, &zero); canvas_image = (Image *) NULL; amount = 0.5; canvas_dissolve = 1.0; clip_to_self = MagickTrue; percent_luma = 100.0; percent_chroma = 100.0; source_dissolve = 1.0; threshold = 0.05 f; switch (compose) { case ClearCompositeOp: case SrcCompositeOp: case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: { /* * Modify canvas outside the overlaid region. */ clip_to_self = MagickFalse; break; } case OverCompositeOp: { if (image->matte != MagickFalse) break; if (source_image->matte != MagickFalse) break; } case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset + (ssize_t) source_image->columns) >= (ssize_t) image->columns) break; if ((y_offset + (ssize_t) source_image->rows) >= (ssize_t) image->rows) break; status = MagickTrue; source_view = AcquireVirtualCacheView(source_image, exception); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y = 0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const IndexPacket * source_indexes; register const PixelPacket * p; register IndexPacket * indexes; register PixelPacket * q; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(source_view, 0, y, source_image->columns, 1, exception); q = GetCacheViewAuthenticPixels(image_view, x_offset, y + y_offset, source_image->columns, 1, exception); if ((p == (const PixelPacket *)NULL) || (q == (PixelPacket *) NULL)) { status = MagickFalse; continue; } source_indexes = GetCacheViewVirtualIndexQueue(source_view); indexes = GetCacheViewAuthenticIndexQueue(image_view); (void)CopyMagickMemory(q, p, source_image->columns * sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (source_indexes != (const IndexPacket *)NULL)) (void)CopyMagickMemory(indexes, source_indexes, source_image->columns * sizeof(*indexes)); sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed = SetImageProgress(image, CompositeImageTag, (MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } source_view = DestroyCacheView(source_view); image_view = DestroyCacheView(image_view); source_image = DestroyImage(source_image); return (status); } case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { /* * Modify canvas outside the overlaid region and require an alpha * channel to exist, to add transparency. */ if (image->matte == MagickFalse) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel); clip_to_self = MagickFalse; break; } case BlurCompositeOp: { CacheView * canvas_view, *source_view; MagickPixelPacket pixel; MagickRealType angle_range, angle_start, height, width; ResampleFilter * resample_filter; SegmentInfo blur; /* * Blur Image by resampling. * * Blur Image dictated by an overlay gradient map: X = red_channel; * Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image = CloneImage(image, image->columns, image->rows, MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image = DestroyImage(source_image); return (MagickFalse); } /* * Gather the maximum blur sigma values from user. */ SetGeometryInfo(&geometry_info); flags = NoValue; value = GetImageArtifact(image, "compose:args"); if (value != (char *)NULL) flags = ParseGeometry(value, &geometry_info); if ((flags & WidthValue) == 0) { (void)ThrowMagickException(exception, GetMagickModule(), OptionWarning, "InvalidGeometry", "'%s' '%s'", "compose:args", value); source_image = DestroyImage(source_image); canvas_image = DestroyImage(canvas_image); return (MagickFalse); } /* * Users input sigma now needs to be converted to the EWA ellipse * size. The filter defaults to a sigma of 0.5 so to make this * match the users input the ellipse size needs to be doubled. */ width = height = geometry_info.rho * 2.0; if ((flags & HeightValue) != 0) height = geometry_info.sigma * 2.0; /* default the unrotated ellipse width and height axis vectors */ blur.x1 = width; blur.x2 = 0.0; blur.y1 = 0.0; blur.y2 = height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0) { MagickRealType angle; angle = DegreesToRadians(geometry_info.xi); blur.x1 = width * cos(angle); blur.x2 = width * sin(angle); blur.y1 = (-height * sin(angle)); blur.y2 = height * cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start = 0.0; angle_range = 0.0; if ((flags & YValue) != 0) { angle_start = DegreesToRadians(geometry_info.xi); angle_range = DegreesToRadians(geometry_info.psi) - angle_start; } /* * Set up a gaussian cylindrical filter for EWA Bluring. * * As the minimum ellipse radius of support*1.0 the EWA algorithm * can only produce a minimum blur of 0.5 for Gaussian * (support=2.0) This means that even 'No Blur' will be still a * little blurry! * * The solution (as well as the problem of preventing any user * expert filter settings, is to set our own user settings, then * restore them afterwards. */ resample_filter = AcquireResampleFilter(image, exception); SetResampleFilter(resample_filter, GaussianFilter, 1.0); /* do the variable blurring of each pixel in image */ pixel = zero; source_view = AcquireVirtualCacheView(source_image, exception); canvas_view = AcquireAuthenticCacheView(canvas_image, exception); for (y = 0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket * magick_restrict p; register PixelPacket * magick_restrict r; register IndexPacket * magick_restrict canvas_indexes; register ssize_t x; if (((y + y_offset) < 0) || ((y + y_offset) >= (ssize_t) image->rows)) continue; p = GetCacheViewVirtualPixels(source_view, 0, y, source_image->columns, 1, exception); r = QueueCacheViewAuthenticPixels(canvas_view, 0, y, canvas_image->columns, 1, exception); if ((p == (const PixelPacket *)NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes = GetCacheViewAuthenticIndexQueue(canvas_view); for (x = 0; x < (ssize_t) source_image->columns; x++) { if (((x_offset + x) < 0) || ((x_offset + x) >= (ssize_t) image->columns)) { p++; continue; } if (fabs(angle_range) > MagickEpsilon) { MagickRealType angle; angle = angle_start + angle_range * QuantumScale * GetPixelBlue(p); blur.x1 = width * cos(angle); blur.x2 = width * sin(angle); blur.y1 = (-height * sin(angle)); blur.y2 = height * cos(angle); } #if 0 if (x == 10 && y == 60) { fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n", blur.x1, blur.x2, blur.y1, blur.y2); fprintf(stderr, "scaled by=%lf,%lf\n", QuantumScale * GetPixelRed(p), QuantumScale * GetPixelGreen(p)); } #endif ScaleResampleFilter(resample_filter, blur.x1 * QuantumScale * GetPixelRed(p), blur.y1 * QuantumScale * GetPixelGreen(p), blur.x2 * QuantumScale * GetPixelRed(p), blur.y2 * QuantumScale * GetPixelGreen(p)); (void)ResamplePixelColor(resample_filter, (double)x_offset + x, (double) y_offset + y, &pixel); SetPixelPacket(canvas_image, &pixel, r, canvas_indexes + x); p++; r++; } sync = SyncCacheViewAuthenticPixels(canvas_view, exception); if (sync == MagickFalse) break; } resample_filter = DestroyResampleFilter(resample_filter); source_view = DestroyCacheView(source_view); canvas_view = DestroyCacheView(canvas_view); source_image = DestroyImage(source_image); source_image = canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView * canvas_view, *source_view, *image_view; MagickPixelPacket pixel; MagickRealType horizontal_scale, vertical_scale; PointInfo center, offset; register IndexPacket * magick_restrict canvas_indexes; register PixelPacket * magick_restrict r; /* * Displace/Distort based on overlay gradient map: X = * red_channel; Y = green_channel; compose:args = * x_scale[,y_scale[,center.x,center.y]] */ canvas_image = CloneImage(image, image->columns, image->rows, MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image = DestroyImage(source_image); return (MagickFalse); } SetGeometryInfo(&geometry_info); flags = NoValue; value = GetImageArtifact(image, "compose:args"); if (value != (char *)NULL) flags = ParseGeometry(value, &geometry_info); if ((flags & (WidthValue | HeightValue)) == 0) { if ((flags & AspectValue) == 0) { horizontal_scale = (MagickRealType) (source_image->columns - 1) / 2.0; vertical_scale = (MagickRealType) (source_image->rows - 1) / 2.0; } else { horizontal_scale = (MagickRealType) (image->columns - 1) / 2.0; vertical_scale = (MagickRealType) (image->rows - 1) / 2.0; } } else { horizontal_scale = geometry_info.rho; vertical_scale = geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale *= (source_image->columns - 1) / 200.0; vertical_scale *= (source_image->rows - 1) / 200.0; } else { horizontal_scale *= (image->columns - 1) / 200.0; vertical_scale *= (image->rows - 1) / 200.0; } } if ((flags & HeightValue) == 0) vertical_scale = horizontal_scale; } /* * Determine fixed center point for absolute distortion map * Absolute distort == Displace offset relative to a fixed * absolute point Select that point according to +X+Y user * inputs. default = center of overlay image arg flag '!' = * locations/percentage relative to background image */ center.x = (MagickRealType) x_offset; center.y = (MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x = ((MagickRealType) image->columns - 1) / 2.0; else center.x = (MagickRealType) (x_offset + (source_image->columns - 1) / 2.0); else if ((flags & AspectValue) == 0) center.x = (MagickRealType) (x_offset + geometry_info.xi); else center.x = geometry_info.xi; if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y = ((MagickRealType) image->rows - 1) / 2.0; else center.y = (MagickRealType) (y_offset + (source_image->rows - 1) / 2.0); else if ((flags & AspectValue) != 0) center.y = geometry_info.psi; else center.y = (MagickRealType) (y_offset + geometry_info.psi); } /* * Shift the pixel offset point as defined by the provided, * displacement/distortion map. -- Like a lens... */ pixel = zero; image_view = AcquireVirtualCacheView(image, exception); source_view = AcquireVirtualCacheView(source_image, exception); canvas_view = AcquireAuthenticCacheView(canvas_image, exception); for (y = 0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket * magick_restrict p; register ssize_t x; if (((y + y_offset) < 0) || ((y + y_offset) >= (ssize_t) image->rows)) continue; p = GetCacheViewVirtualPixels(source_view, 0, y, source_image->columns, 1, exception); r = QueueCacheViewAuthenticPixels(canvas_view, 0, y, canvas_image->columns, 1, exception); if ((p == (const PixelPacket *)NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes = GetCacheViewAuthenticIndexQueue(canvas_view); for (x = 0; x < (ssize_t) source_image->columns; x++) { if (((x_offset + x) < 0) || ((x_offset + x) >= (ssize_t) image->columns)) { p++; continue; } /* * Displace the offset. */ offset.x = (double)((horizontal_scale * (GetPixelRed(p) - (((MagickRealType) QuantumRange + 1.0) / 2.0))) / (((MagickRealType) QuantumRange + 1.0) / 2.0) + center.x + ((compose == DisplaceCompositeOp) ? x : 0)); offset.y = (double)((vertical_scale * (GetPixelGreen(p) - (((MagickRealType) QuantumRange + 1.0) / 2.0))) / (((MagickRealType) QuantumRange + 1.0) / 2.0) + center.y + ((compose == DisplaceCompositeOp) ? y : 0)); (void)InterpolateMagickPixelPacket(image, image_view, UndefinedInterpolatePixel, (double)offset.x, (double)offset.y, &pixel, exception); /* * Mask with the 'invalid pixel mask' in alpha channel. */ pixel.opacity = (MagickRealType) QuantumRange *(1.0 - (1.0 - QuantumScale * pixel.opacity) * (1.0 - QuantumScale * GetPixelOpacity(p))); SetPixelPacket(canvas_image, &pixel, r, canvas_indexes + x); p++; r++; } sync = SyncCacheViewAuthenticPixels(canvas_view, exception); if (sync == MagickFalse) break; } canvas_view = DestroyCacheView(canvas_view); source_view = DestroyCacheView(source_view); image_view = DestroyCacheView(image_view); source_image = DestroyImage(source_image); source_image = canvas_image; break; } case DissolveCompositeOp: { /* * Geometry arguments to dissolve factors. */ value = GetImageArtifact(image, "compose:args"); if (value != (char *)NULL) { flags = ParseGeometry(value, &geometry_info); source_dissolve = geometry_info.rho / 100.0; canvas_dissolve = 1.0; if ((source_dissolve - MagickEpsilon) < 0.0) source_dissolve = 0.0; if ((source_dissolve + MagickEpsilon) > 1.0) { canvas_dissolve = 2.0 - source_dissolve; source_dissolve = 1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve = geometry_info.sigma / 100.0; if ((canvas_dissolve - MagickEpsilon) < 0.0) canvas_dissolve = 0.0; clip_to_self = MagickFalse; if ((canvas_dissolve + MagickEpsilon) > 1.0) { canvas_dissolve = 1.0; clip_to_self = MagickTrue; } } break; } case BlendCompositeOp: { value = GetImageArtifact(image, "compose:args"); if (value != (char *)NULL) { flags = ParseGeometry(value, &geometry_info); source_dissolve = geometry_info.rho / 100.0; canvas_dissolve = 1.0 - source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve = geometry_info.sigma / 100.0; clip_to_self = MagickFalse; if ((canvas_dissolve + MagickEpsilon) > 1.0) clip_to_self = MagickTrue; } break; } case MathematicsCompositeOp: { /* * Just collect the values from "compose:args", setting. Unused * values are set to zero automagically. * * Arguments are normally a comma separated list, so this probably * should be changed to some 'general comma list' parser, (with a * minimum number of values) */ SetGeometryInfo(&geometry_info); value = GetImageArtifact(image, "compose:args"); if (value != (char *)NULL) (void)ParseGeometry(value, &geometry_info); break; } case ModulateCompositeOp: { /* * Determine the luma and chroma scale. */ value = GetImageArtifact(image, "compose:args"); if (value != (char *)NULL) { flags = ParseGeometry(value, &geometry_info); percent_luma = geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma = geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* * Determine the amount and threshold. This Composition method is * deprecated */ value = GetImageArtifact(image, "compose:args"); if (value != (char *)NULL) { flags = ParseGeometry(value, &geometry_info); amount = geometry_info.rho; threshold = geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold = 0.05 f; } threshold *= QuantumRange; break; } default: break; } value = GetImageArtifact(image, "compose:outside-overlay"); if (value != (const char *)NULL) clip_to_self = IsMagickTrue(value) == MagickFalse ? MagickTrue : MagickFalse; clamp = MagickTrue; value = GetImageArtifact(image, "compose:clamp"); if (value != (const char *)NULL) clamp = IsMagickTrue(value); /* * Composite image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status = AccelerateCompositeImage(image, channel, compose, source_image, x_offset, y_offset, canvas_dissolve, source_dissolve, exception); if (status != MagickFalse) return (status); #endif status = MagickTrue; progress = 0; midpoint = ((MagickRealType) QuantumRange + 1.0) / 2; GetMagickPixelPacket(source_image, &zero); source_view = AcquireVirtualCacheView(source_image, exception); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { const PixelPacket * pixels; double luma, hue, chroma, sans; MagickPixelPacket composite, canvas, source; register const IndexPacket * magick_restrict source_indexes; register const PixelPacket * magick_restrict p; register IndexPacket * magick_restrict indexes; register ssize_t x; register PixelPacket * magick_restrict q; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y - y_offset) >= (ssize_t) source_image->rows) continue; } /* * If pixels is NULL, y is outside overlay region. */ pixels = (PixelPacket *) NULL; p = (PixelPacket *) NULL; if ((y >= y_offset) && ((y - y_offset) < (ssize_t) source_image->rows)) { p = GetCacheViewVirtualPixels(source_view, 0, y - y_offset, source_image->columns, 1, exception); if (p == (const PixelPacket *)NULL) { status = MagickFalse; continue; } pixels = p; if (x_offset < 0) p -= x_offset; } q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } indexes = GetCacheViewAuthenticIndexQueue(image_view); source_indexes = GetCacheViewVirtualIndexQueue(source_view); GetMagickPixelPacket(source_image, &source); GetMagickPixelPacket(image, &canvas); hue = 0.0; chroma = 0.0; luma = 0.0; for (x = 0; x < (ssize_t) image->columns; x++) { if (clip_to_self != MagickFalse) { if (x < x_offset) { q++; continue; } if ((x - x_offset) >= (ssize_t) source_image->columns) break; } canvas.red = (MagickRealType) GetPixelRed(q); canvas.green = (MagickRealType) GetPixelGreen(q); canvas.blue = (MagickRealType) GetPixelBlue(q); if (image->matte != MagickFalse) canvas.opacity = (MagickRealType) GetPixelOpacity(q); if (image->colorspace == CMYKColorspace) canvas.index = (MagickRealType) GetPixelIndex(indexes + x); if (image->colorspace == CMYKColorspace) { canvas.red = (MagickRealType) QuantumRange - canvas.red; canvas.green = (MagickRealType) QuantumRange - canvas.green; canvas.blue = (MagickRealType) QuantumRange - canvas.blue; canvas.index = (MagickRealType) QuantumRange - canvas.index; } /* * Handle canvas modifications outside overlaid region. */ composite = canvas; if ((pixels == (PixelPacket *) NULL) || (x < x_offset) || ((x - x_offset) >= (ssize_t) source_image->columns)) { switch (compose) { case DissolveCompositeOp: case BlendCompositeOp: { composite.opacity = (MagickRealType) (QuantumRange - canvas_dissolve * (QuantumRange - composite.opacity)); break; } case ClearCompositeOp: case SrcCompositeOp: { CompositeClear(&canvas, &composite); break; } case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { composite.opacity = (MagickRealType) TransparentOpacity; break; } default: { (void)GetOneVirtualMagickPixel(source_image, x - x_offset, y - y_offset, &composite, exception); break; } } if (image->colorspace == CMYKColorspace) { composite.red = (MagickRealType) QuantumRange - composite.red; composite.green = (MagickRealType) QuantumRange - composite.green; composite.blue = (MagickRealType) QuantumRange - composite.blue; composite.index = (MagickRealType) QuantumRange - composite.index; } SetPixelRed(q, clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q, clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q, clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); if (image->matte != MagickFalse) SetPixelOpacity(q, clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes + x, clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); q++; continue; } /* * Handle normal overlay of source onto canvas. */ source.red = (MagickRealType) GetPixelRed(p); source.green = (MagickRealType) GetPixelGreen(p); source.blue = (MagickRealType) GetPixelBlue(p); if (source_image->matte != MagickFalse) source.opacity = (MagickRealType) GetPixelOpacity(p); if (source_image->colorspace == CMYKColorspace) source.index = (MagickRealType) GetPixelIndex(source_indexes + x - x_offset); if (source_image->colorspace == CMYKColorspace) { source.red = (MagickRealType) QuantumRange - source.red; source.green = (MagickRealType) QuantumRange - source.green; source.blue = (MagickRealType) QuantumRange - source.blue; source.index = (MagickRealType) QuantumRange - source.index; } switch (compose) { /* Duff-Porter Compositions */ case ClearCompositeOp: { CompositeClear(&canvas, &composite); break; } case SrcCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: { composite = source; break; } case NoCompositeOp: case DstCompositeOp: break; case OverCompositeOp: case SrcOverCompositeOp: { MagickPixelCompositeOver(&source, source.opacity, &canvas, canvas.opacity, &composite); break; } case DstOverCompositeOp: { MagickPixelCompositeOver(&canvas, canvas.opacity, &source, source.opacity, &composite); break; } case SrcInCompositeOp: case InCompositeOp: { CompositeIn(&source, &canvas, &composite); break; } case DstInCompositeOp: { CompositeIn(&canvas, &source, &composite); break; } case OutCompositeOp: case SrcOutCompositeOp: { CompositeOut(&source, &canvas, &composite); break; } case DstOutCompositeOp: { CompositeOut(&canvas, &source, &composite); break; } case AtopCompositeOp: case SrcAtopCompositeOp: { CompositeAtop(&source, &canvas, &composite); break; } case DstAtopCompositeOp: { CompositeAtop(&canvas, &source, &composite); break; } case XorCompositeOp: { CompositeXor(&source, &canvas, &composite); break; } /* Mathematical Compositions */ case PlusCompositeOp: { CompositePlus(&source, &canvas, channel, &composite); break; } case MinusDstCompositeOp: { CompositeMinus(&source, &canvas, channel, &composite); break; } case MinusSrcCompositeOp: { CompositeMinus(&canvas, &source, channel, &composite); break; } case ModulusAddCompositeOp: { CompositeModulusAdd(&source, &canvas, channel, &composite); break; } case ModulusSubtractCompositeOp: { CompositeModulusSubtract(&source, &canvas, channel, &composite); break; } case DifferenceCompositeOp: { CompositeDifference(&source, &canvas, channel, &composite); break; } case ExclusionCompositeOp: { CompositeExclusion(&source, &canvas, channel, &composite); break; } case MultiplyCompositeOp: { CompositeMultiply(&source, &canvas, channel, &composite); break; } case ScreenCompositeOp: { CompositeScreen(&source, &canvas, channel, &composite); break; } case DivideDstCompositeOp: { CompositeDivide(&source, &canvas, channel, &composite); break; } case DivideSrcCompositeOp: { CompositeDivide(&canvas, &source, channel, &composite); break; } case DarkenCompositeOp: { CompositeDarken(&source, &canvas, channel, &composite); break; } case LightenCompositeOp: { CompositeLighten(&source, &canvas, channel, &composite); break; } case DarkenIntensityCompositeOp: { CompositeDarkenIntensity(&source, &canvas, channel, &composite); break; } case LightenIntensityCompositeOp: { CompositeLightenIntensity(&source, &canvas, channel, &composite); break; } case MathematicsCompositeOp: { CompositeMathematics(&source, &canvas, channel, &geometry_info, &composite); break; } /* Lighting Compositions */ case ColorDodgeCompositeOp: { CompositeColorDodge(&source, &canvas, &composite); break; } case ColorBurnCompositeOp: { CompositeColorBurn(&source, &canvas, &composite); break; } case LinearDodgeCompositeOp: { CompositeLinearDodge(&source, &canvas, &composite); break; } case LinearBurnCompositeOp: { CompositeLinearBurn(&source, &canvas, &composite); break; } case HardLightCompositeOp: { CompositeHardLight(&source, &canvas, &composite); break; } case HardMixCompositeOp: { CompositeHardMix(&source, &canvas, &composite); break; } case OverlayCompositeOp: { /* Overlay = Reversed HardLight. */ CompositeHardLight(&canvas, &source, &composite); break; } case SoftLightCompositeOp: { CompositeSoftLight(&source, &canvas, &composite); break; } case LinearLightCompositeOp: { CompositeLinearLight(&source, &canvas, &composite); break; } case PegtopLightCompositeOp: { CompositePegtopLight(&source, &canvas, &composite); break; } case VividLightCompositeOp: { CompositeVividLight(&source, &canvas, &composite); break; } case PinLightCompositeOp: { CompositePinLight(&source, &canvas, &composite); break; } /* Other Composition */ case ChangeMaskCompositeOp: { if ((composite.opacity > ((MagickRealType) QuantumRange / 2.0)) || (IsMagickColorSimilar(&source, &canvas) != MagickFalse)) composite.opacity = (MagickRealType) TransparentOpacity; else composite.opacity = (MagickRealType) OpaqueOpacity; break; } case BumpmapCompositeOp: { if (source.opacity == TransparentOpacity) break; CompositeBumpmap(&source, &canvas, &composite); break; } case DissolveCompositeOp: { MagickPixelCompositeOver(&source, (MagickRealType) (QuantumRange - source_dissolve * (QuantumRange - source.opacity)), &canvas, (MagickRealType) (QuantumRange - canvas_dissolve * (QuantumRange - canvas.opacity)), &composite); break; } case BlendCompositeOp: { MagickPixelCompositeBlend(&source, source_dissolve, &canvas, canvas_dissolve, &composite); break; } case StereoCompositeOp: { canvas.red = (MagickRealType) GetPixelRed(p); break; } case ThresholdCompositeOp: { CompositeThreshold(&source, &canvas, threshold, amount, &composite); break; } case ModulateCompositeOp: { ssize_t offset; if (source.opacity == TransparentOpacity) break; offset = (ssize_t) (MagickPixelIntensityToQuantum(&source) - midpoint); if (offset == 0) break; CompositeHCL(canvas.red, canvas.green, canvas.blue, &hue, &chroma, &luma); luma += (0.01 * percent_luma * offset) / midpoint; chroma *= 0.01 * percent_chroma; HCLComposite(hue, chroma, luma, &composite.red, &composite.green, &composite.blue); break; } case HueCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite = source; break; } CompositeHCL(canvas.red, canvas.green, canvas.blue, &hue, &chroma, &luma); CompositeHCL(source.red, source.green, source.blue, &hue, &sans, &sans); HCLComposite(hue, chroma, luma, &composite.red, &composite.green, &composite.blue); if (source.opacity < canvas.opacity) composite.opacity = source.opacity; break; } case SaturateCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite = source; break; } CompositeHCL(canvas.red, canvas.green, canvas.blue, &hue, &chroma, &luma); CompositeHCL(source.red, source.green, source.blue, &sans, &chroma, &sans); HCLComposite(hue, chroma, luma, &composite.red, &composite.green, &composite.blue); if (source.opacity < canvas.opacity) composite.opacity = source.opacity; break; } case LuminizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite = source; break; } CompositeHCL(canvas.red, canvas.green, canvas.blue, &hue, &chroma, &luma); CompositeHCL(source.red, source.green, source.blue, &sans, &sans, &luma); HCLComposite(hue, chroma, luma, &composite.red, &composite.green, &composite.blue); if (source.opacity < canvas.opacity) composite.opacity = source.opacity; break; } case ColorizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite = source; break; } CompositeHCL(canvas.red, canvas.green, canvas.blue, &sans, &sans, &luma); CompositeHCL(source.red, source.green, source.blue, &hue, &chroma, &sans); HCLComposite(hue, chroma, luma, &composite.red, &composite.green, &composite.blue); if (source.opacity < canvas.opacity) composite.opacity = source.opacity; break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { composite.red = source.red; break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { composite.green = source.green; break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { composite.blue = source.blue; break; } case CopyOpacityCompositeOp: { if (source.matte == MagickFalse) composite.opacity = (MagickRealType) (QuantumRange - MagickPixelIntensityToQuantum(&source)); else composite.opacity = source.opacity; break; } case CopyBlackCompositeOp: { if (source.colorspace != CMYKColorspace) ConvertRGBToCMYK(&source); composite.index = QuantumRange - source.index; break; } /* compose methods that are already handled */ case BlurCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: { composite = source; break; } default: break; } if (image->colorspace == CMYKColorspace) { composite.red = (MagickRealType) QuantumRange - composite.red; composite.green = (MagickRealType) QuantumRange - composite.green; composite.blue = (MagickRealType) QuantumRange - composite.blue; composite.index = (MagickRealType) QuantumRange - composite.index; } SetPixelRed(q, clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q, clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q, clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); SetPixelOpacity(q, clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes + x, clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); p++; if (p >= (pixels + source_image->columns)) p = pixels; q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImageChannel) #endif proceed = SetImageProgress(image, CompositeImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } source_view = DestroyCacheView(source_view); image_view = DestroyCacheView(image_view); if (canvas_image != (Image *) NULL) canvas_image = DestroyImage(canvas_image); else source_image = DestroyImage(source_image); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % T e x t u r e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % TextureImage() repeatedly tiles the texture image across and * down the image % canvas. % % The format of the TextureImage method is: % * % MagickBooleanType TextureImage(Image *image,const Image *texture) % * % A description of each parameter follows: % % o image: the image. % % * o texture: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image * image, const Image * texture) { #define TextureImageTag "Texture/Image" CacheView * image_view, *texture_view; ExceptionInfo * exception; Image * texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *)NULL) return (MagickFalse); if (SetImageStorageClass(image, DirectClass) == MagickFalse) return (MagickFalse); exception = (&image->exception); texture_image = CloneImage(texture, 0, 0, MagickTrue, exception); if (texture_image == (const Image *)NULL) return (MagickFalse); (void)TransformImageColorspace(texture_image, image->colorspace); (void)SetImageVirtualPixelMethod(texture_image, TileVirtualPixelMethod); status = MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->matte != MagickFalse) || (texture_image->matte != MagickFalse))) { /* * Tile texture onto the image background. */ for (y = 0; y < (ssize_t) image->rows; y += (ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x = 0; x < (ssize_t) image->columns; x += (ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status = CompositeImage(image, image->compose, texture_image, x + texture_image->tile_offset.x, y + texture_image->tile_offset.y); if (thread_status == MagickFalse) { status = thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, TextureImageTag, (MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } (void)SetImageProgress(image, TextureImageTag, (MagickOffsetType) image->rows, image->rows); texture_image = DestroyImage(texture_image); return (status); } /* * Tile texture onto the image background (optimized). */ status = MagickTrue; texture_view = AcquireVirtualCacheView(texture_image, exception); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,texture_image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket * texture_indexes; register const PixelPacket * p; register IndexPacket * indexes; register ssize_t x; register PixelPacket * q; size_t width; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(texture_view, texture_image->tile_offset.x, (y + texture_image->tile_offset.y) % texture_image->rows, texture_image->columns, 1, exception); q = QueueCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if ((p == (const PixelPacket *)NULL) || (q == (PixelPacket *) NULL)) { status = MagickFalse; continue; } texture_indexes = GetCacheViewVirtualIndexQueue(texture_view); indexes = GetCacheViewAuthenticIndexQueue(image_view); for (x = 0; x < (ssize_t) image->columns; x += (ssize_t) texture_image->columns) { width = texture_image->columns; if ((x + (ssize_t) width) > (ssize_t) image->columns) width = image->columns - x; (void)CopyMagickMemory(q, p, width * sizeof(*p)); if ((image->colorspace == CMYKColorspace) && (texture_image->colorspace == CMYKColorspace)) { (void)CopyMagickMemory(indexes, texture_indexes, width * sizeof(*indexes)); indexes += width; } q += width; } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TextureImage) #endif proceed = SetImageProgress(image, TextureImageTag, (MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } texture_view = DestroyCacheView(texture_view); image_view = DestroyCacheView(image_view); texture_image = DestroyImage(texture_image); return (status); }
GB_unaryop__lnot_uint32_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint32_int8 // op(A') function: GB_tran__lnot_uint32_int8 // C type: uint32_t // A type: int8_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint32_int8 ( uint32_t *Cx, // Cx and Ax may be aliased int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint32_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint32_int8 // op(A') function: GB_tran__lnot_uint32_int8 // C type: uint32_t // A type: int8_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint32_int8 ( uint32_t *Cx, // Cx and Ax may be aliased int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint32_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint32_int8 // op(A') function: GB_tran__lnot_uint32_int8 // C type: uint32_t // A type: int8_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint32_int8 ( uint32_t *Cx, // Cx and Ax may be aliased int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint32_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
8850.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp target teams distribute parallel for private(j) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp target teams distribute for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
#include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj)) { //printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* * DCE code. Must scan the entire live-out data. Can be used also to check * the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* * Main computational kernel. The whole function will be timed, including the * call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj), DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj)) { int i, j; #pragma scop for (i = 1; i < _PB_NI - 1; ++i) { for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i - 1][j - 1] + 0.5 * A[i - 1][j] + -0.8 * A[i - 1][j + 1] + -0.3 * A[i][j - 1] + 0.6 * A[i][j] + -0.9 * A[i][j + 1] + 0.4 * A[i + 1][j - 1] + 0.7 * A[i + 1][j] + 0.1 * A[i + 1][j + 1]; } } #pragma endscop //printf("Kernal computation complete !!\n"); } int main(int argc, char **argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array(ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d(ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* * Prevent dead-code elimination. All live-out data must be printed by * the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
#include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj)) { //printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* * DCE code. Must scan the entire live-out data. Can be used also to check * the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* * Main computational kernel. The whole function will be timed, including the * call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj), DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj)) { int i, j; #pragma scop #pragma omp target teams distribute parallel for private(j) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp target teams distribute for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i - 1][j - 1] + 0.5 * A[i - 1][j] + -0.8 * A[i - 1][j + 1] + -0.3 * A[i][j - 1] + 0.6 * A[i][j] + -0.9 * A[i][j + 1] + 0.4 * A[i + 1][j - 1] + 0.7 * A[i + 1][j] + 0.1 * A[i + 1][j + 1]; } } #pragma endscop //printf("Kernal computation complete !!\n"); } int main(int argc, char **argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array(ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d(ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* * Prevent dead-code elimination. All live-out data must be printed by * the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
ej2.c
#include <stdio.h> #include <math.h> #include <float.h> #include <stdlib.h> #include <omp.h> #include "ctimer.h" main(int argc, char**argv) { ////// PRODUCTO MATRIZ-VECTOR x=A*b ////// // DECLARACION DE VARIABLES // // DECLARACION DE VARIABLES // double t1,t2,tucpu,tscpu; const long int M= 1048576; double TALLA = 8; double *b; b=malloc(M*sizeof(double)); double suma; srand(time(0)); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); printf("Programa que calcula el Producto Matriz-Vector. \n"); printf("------- \n"); int i; // GENERACION DE DATOS // // for (i=0;i<M;i++){ b[i]=rand(); } // PRODUCTO MATRIZ-VECTOR SECUENCIAL // // // printf("Voy a empezar la suma secuencial. \n"); printf(" ------- \n"); double alfa; ctimer(&t1,&tucpu,&tscpu); alfa=0.0; for(i=0;i<M;i++){ alfa+=b[i]; } ctimer(&t2,&tucpu,&tscpu); printf("Suma = %f \n",alfa); printf(" ------- \n"); printf("Tiempo %f segundos \n",(float) (t2-t1)); printf(" ------- \n"); // PRODUCTO MATRIZ // PRODUCTO MATRIZ-VECTOR PARALELO / VECTOR PARALELO // printf("Empiezo la suma paralela\n"); printf(" ------- \n"); ctimer(&t1,&tucpu,&tscpu); int tb; int tid; tb=M/TALLA; omp_set_num_threads(TALLA); double sol; sol = 0.0; #pragma omp parallel for reduction(+:sol) private(i,tb) for (i=0;i<M;i++){ sol += b[i]; } ctimer(&t2,&tucpu,&tscpu); // SALIDA DE RESULTADOS // printf("Ha terminado la suma paralela\n"); printf(" ------- \n"); printf("Suma = %f \n",sol); printf(" ------- \n"); // Fin del calculo del Producto Matriz-Vector paralelo printf("Tiempo %f segundos \n",(float) (t2-t1)); printf("He acabado. \n"); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); }
#include <stdio.h> #include <math.h> #include <float.h> #include <stdlib.h> #include <omp.h> #include "ctimer.h" main(int argc, char**argv) { ////// PRODUCTO MATRIZ-VECTOR x=A*b ////// // DECLARACION DE VARIABLES // // DECLARACION DE VARIABLES // double t1,t2,tucpu,tscpu; const long int M= 1048576; double TALLA = 8; double *b; b=malloc(M*sizeof(double)); double suma; srand(time(0)); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); printf("Programa que calcula el Producto Matriz-Vector. \n"); printf("------- \n"); int i; // GENERACION DE DATOS // // for (i=0;i<M;i++){ b[i]=rand(); } // PRODUCTO MATRIZ-VECTOR SECUENCIAL // // // printf("Voy a empezar la suma secuencial. \n"); printf(" ------- \n"); double alfa; ctimer(&t1,&tucpu,&tscpu); alfa=0.0; for(i=0;i<M;i++){ alfa+=b[i]; } ctimer(&t2,&tucpu,&tscpu); printf("Suma = %f \n",alfa); printf(" ------- \n"); printf("Tiempo %f segundos \n",(float) (t2-t1)); printf(" ------- \n"); // PRODUCTO MATRIZ // PRODUCTO MATRIZ-VECTOR PARALELO / VECTOR PARALELO // printf("Empiezo la suma paralela\n"); printf(" ------- \n"); ctimer(&t1,&tucpu,&tscpu); int tb; int tid; tb=M/TALLA; omp_set_num_threads(TALLA); double sol; sol = 0.0; for (i=0;i<M;i++){ sol += b[i]; } ctimer(&t2,&tucpu,&tscpu); // SALIDA DE RESULTADOS // printf("Ha terminado la suma paralela\n"); printf(" ------- \n"); printf("Suma = %f \n",sol); printf(" ------- \n"); // Fin del calculo del Producto Matriz-Vector paralelo printf("Tiempo %f segundos \n",(float) (t2-t1)); printf("He acabado. \n"); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); }
#include <stdio.h> #include <math.h> #include <float.h> #include <stdlib.h> #include <omp.h> #include "ctimer.h" main(int argc, char**argv) { ////// PRODUCTO MATRIZ-VECTOR x=A*b ////// // DECLARACION DE VARIABLES // // DECLARACION DE VARIABLES // double t1,t2,tucpu,tscpu; const long int M= 1048576; double TALLA = 8; double *b; b=malloc(M*sizeof(double)); double suma; srand(time(0)); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); printf("Programa que calcula el Producto Matriz-Vector. \n"); printf("------- \n"); int i; // GENERACION DE DATOS // // for (i=0;i<M;i++){ b[i]=rand(); } // PRODUCTO MATRIZ-VECTOR SECUENCIAL // // // printf("Voy a empezar la suma secuencial. \n"); printf(" ------- \n"); double alfa; ctimer(&t1,&tucpu,&tscpu); alfa=0.0; for(i=0;i<M;i++){ alfa+=b[i]; } ctimer(&t2,&tucpu,&tscpu); printf("Suma = %f \n",alfa); printf(" ------- \n"); printf("Tiempo %f segundos \n",(float) (t2-t1)); printf(" ------- \n"); // PRODUCTO MATRIZ // PRODUCTO MATRIZ-VECTOR PARALELO / VECTOR PARALELO // printf("Empiezo la suma paralela\n"); printf(" ------- \n"); ctimer(&t1,&tucpu,&tscpu); int tb; int tid; tb=M/TALLA; omp_set_num_threads(TALLA); double sol; sol = 0.0; #pragma omp parallel for reduction(+:sol) private(i,tb) for (i=0;i<M;i++){ sol += b[i]; } ctimer(&t2,&tucpu,&tscpu); // SALIDA DE RESULTADOS // printf("Ha terminado la suma paralela\n"); printf(" ------- \n"); printf("Suma = %f \n",sol); printf(" ------- \n"); // Fin del calculo del Producto Matriz-Vector paralelo printf("Tiempo %f segundos \n",(float) (t2-t1)); printf("He acabado. \n"); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); }
mlp_mnist_bf16_amx_fused_trans_fused_sgd_numa.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Evangelos Georganas, Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include <libxsmm.h> #include <libxsmm_sync.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) # include <omp.h> #endif /* include c-based dnn library */ #include "../common/dnn_common.h" #include "../common/mnist.h" #define TEST_ACCURACY #define OVERWRITE_DOUTPUT_BWDUPD /*#define FUSE_WT_TRANS_SGD*/ /*#define FUSE_ACT_TRANS_FWD*/ /*#define FUSE_DACT_TRANS_BWD*/ #define PRIVATE_WT_TRANS #define PRIVATE_ACT_TRANS #define PRIVATE_DACT_TRANS #define FUSE_SGD_IN_BWD #define _mm512_load_fil(A) _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepi16_epi32(_mm256_loadu_si256((__m256i*)(A))),16)) #define _mm512_store_fil(A,B) _mm256_storeu_si256((__m256i*)(A), (__m256i)LIBXSMM_INTRINSICS_MM512_CVT_FP32_BF16((B))) static int threads_per_numa = 0; LIBXSMM_INLINE void my_init_buf(float* buf, size_t size, int initPos, int initOne) { int i; zero_buf(buf, size); for (i = 0; i < (int)size; ++i) { buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0))); } } LIBXSMM_INLINE void my_init_buf_bf16(libxsmm_bfloat16* buf, size_t size, int initPos, int initOne) { int i; zero_buf_bf16(buf, size); for (i = 0; i < (int)size; ++i) { libxsmm_bfloat16_hp tmp; tmp.f = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0))); buf[i] = tmp.i[1]; } } LIBXSMM_INLINE void init_buf_bf16_numa_aware(int threads, int ltid, int ft_mode, libxsmm_bfloat16* buf, size_t size, int initPos, int initOne) { int chunksize, chunks; int my_numa_node = ltid/threads_per_numa; int n_numa_nodes = threads/threads_per_numa; int l = 0; if (ft_mode == 0) { /* Mode 0 : Block cyclic assignment to NUMA nodes */ int bufsize = size * 2; chunksize = 4096; chunks = (bufsize + chunksize - 1)/chunksize; for (l = 0; l < chunks; l++) { int _chunksize = (l < chunks - 1) ? chunksize : bufsize - (chunks-1) * chunksize; if ( l % n_numa_nodes == my_numa_node) { my_init_buf_bf16((libxsmm_bfloat16*) buf+l*(chunksize/2), _chunksize/2, 0, 0 ); } } } else { /* Mode 1: Block assignement to NUMA nodes */ chunks = n_numa_nodes; chunksize = (size + chunks - 1) /chunks; for (l = 0; l < chunks; l++) { int _chunksize = (l < chunks - 1) ? chunksize : size - (chunks-1) * chunksize; if ( l == my_numa_node) { my_init_buf_bf16((libxsmm_bfloat16*) buf+l*chunksize, _chunksize, 0, 0 ); } } } } void init_buffer_block_numa(libxsmm_bfloat16* buf, size_t size) { int nThreads = omp_get_max_threads(); #if defined(_OPENMP) # pragma omp parallel #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif if (tid % threads_per_numa == 0) { init_buf_bf16_numa_aware(nThreads, tid, 1, buf, size, 0, 0); } } } void init_buffer_block_cyclic_numa(libxsmm_bfloat16* buf, size_t size) { int nThreads = omp_get_max_threads(); #if defined(_OPENMP) # pragma omp parallel #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif if (tid % threads_per_numa == 0) { init_buf_bf16_numa_aware(nThreads, tid, 0, buf, size, 0, 0); } } } #if 0 LIBXSMM_INLINE void my_matrix_copy_KCCK_to_KCCK_vnni(float *src, float *dst, int C, int K, int bc, int bk) { int k1, k2, c1, c2; int kBlocks = K/bk; int cBlocks = C/bc; LIBXSMM_VLA_DECL(4, float, real_src, src, cBlocks, bc, bk); LIBXSMM_VLA_DECL(5, float, real_dst, dst, cBlocks, bc/2, bk, 2); for (k1 = 0; k1 < kBlocks; k1++) { for (c1 = 0; c1 < cBlocks; c1++) { for (c2 = 0; c2 < bc; c2++) { for (k2 = 0; k2 < bk; k2++) { LIBXSMM_VLA_ACCESS(5, real_dst, k1, c1, c2/2, k2, c2%2, cBlocks, bc/2, bk, 2) = LIBXSMM_VLA_ACCESS(4, real_src, k1, c1, c2, k2, cBlocks, bc, bk); } } } } } #endif typedef enum my_eltwise_fuse { MY_ELTWISE_FUSE_NONE = 0, MY_ELTWISE_FUSE_BIAS = 1, MY_ELTWISE_FUSE_RELU = 2, MY_ELTWISE_FUSE_BIAS_RELU = MY_ELTWISE_FUSE_BIAS | MY_ELTWISE_FUSE_RELU } my_eltwise_fuse; typedef enum my_pass { MY_PASS_FWD = 1, MY_PASS_BWD_D = 2, MY_PASS_BWD_W = 4, MY_PASS_BWD = 6 } my_pass; typedef struct my_opt_config { libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; libxsmm_blasint opt_2d_blocking; libxsmm_blasint opt_col_teams; libxsmm_blasint opt_row_teams; float lr; size_t scratch_size; libxsmm_barrier* barrier; } my_opt_config; typedef struct my_smax_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint threads; size_t scratch_size; libxsmm_barrier* barrier; } my_smax_fwd_config; typedef struct my_smax_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint threads; size_t scratch_size; float loss_weight; libxsmm_barrier* barrier; my_eltwise_fuse fuse_type; } my_smax_bwd_config; typedef struct my_vnni_reformat_config { libxsmm_blasint C; libxsmm_blasint N; libxsmm_blasint bc; libxsmm_blasint bn; libxsmm_blasint threads; libxsmm_barrier* barrier; my_eltwise_fuse fuse_type; libxsmm_meltwfunction_unary norm_to_vnni_kernel; libxsmm_meltwfunction_unary fused_relu_kernel; } my_vnni_reformat_config; typedef struct my_fc_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; my_eltwise_fuse fuse_type; libxsmm_blasint fwd_bf; libxsmm_blasint fwd_2d_blocking; libxsmm_blasint fwd_col_teams; libxsmm_blasint fwd_row_teams; libxsmm_blasint fwd_M_hyperpartitions; libxsmm_blasint fwd_N_hyperpartitions; size_t scratch_size; libxsmm_barrier* barrier; libxsmm_bsmmfunction fwd_config_kernel; libxsmm_bsmmfunction tilerelease_kernel; libxsmm_bsmmfunction_reducebatch_strd gemm_fwd; libxsmm_bsmmfunction_reducebatch_strd gemm_fwd2; libxsmm_bmmfunction_reducebatch_strd gemm_fwd3; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd4; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd5; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd6; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd7; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd8; libxsmm_meltwfunction_unary fwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary fwd_cvtfp32bf16_relu_kernel; libxsmm_meltwfunction_unary fwd_sigmoid_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary fwd_zero_kernel; libxsmm_meltwfunction_unary fwd_copy_bf16fp32_kernel; libxsmm_meltwfunction_unary fwd_colbcast_bf16fp32_copy_kernel; } my_fc_fwd_config; typedef struct my_fc_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; my_eltwise_fuse fuse_type; libxsmm_blasint bwd_bf; libxsmm_blasint bwd_2d_blocking; libxsmm_blasint bwd_col_teams; libxsmm_blasint bwd_row_teams; libxsmm_blasint bwd_M_hyperpartitions; libxsmm_blasint bwd_N_hyperpartitions; libxsmm_blasint upd_bf; libxsmm_blasint upd_2d_blocking; libxsmm_blasint upd_col_teams; libxsmm_blasint upd_row_teams; libxsmm_blasint upd_M_hyperpartitions; libxsmm_blasint upd_N_hyperpartitions; libxsmm_blasint ifm_subtasks; libxsmm_blasint ofm_subtasks; libxsmm_blasint fuse_relu_bwd; size_t bwd_private_tr_wt_scratch_mark; size_t upd_private_tr_act_scratch_mark; size_t upd_private_tr_dact_scratch_mark; size_t scratch_size; size_t doutput_scratch_mark; libxsmm_barrier* barrier; libxsmm_bsmmfunction bwd_config_kernel; libxsmm_bsmmfunction upd_config_kernel; libxsmm_bsmmfunction tilerelease_kernel; libxsmm_bsmmfunction_reducebatch_strd gemm_bwd; libxsmm_bsmmfunction_reducebatch_strd gemm_bwd2; libxsmm_bmmfunction_reducebatch_strd gemm_bwd3; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_bwd5; libxsmm_meltwfunction_unary bwd_fused_relu_kernel; libxsmm_bsmmfunction_reducebatch_strd gemm_upd; libxsmm_bsmmfunction_reducebatch_strd gemm_upd2; libxsmm_bmmfunction_reducebatch_strd gemm_upd3; libxsmm_meltwfunction_unary bwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary upd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary bwd_relu_kernel; libxsmm_meltwfunction_unary bwd_zero_kernel; libxsmm_meltwfunction_unary upd_zero_kernel; libxsmm_meltwfunction_unary delbias_reduce_kernel; libxsmm_meltwfunction_unary vnni_to_vnniT_kernel; libxsmm_meltwfunction_unary norm_to_normT_kernel; libxsmm_meltwfunction_unary norm_to_vnni_kernel; libxsmm_meltwfunction_unary norm_to_vnni_kernel_wt; float lr; } my_fc_bwd_config; my_vnni_reformat_config setup_my_vnni_reformat(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads, my_eltwise_fuse fuse_type) { my_vnni_reformat_config res; libxsmm_blasint ld = bc; res.N = N; res.C = C; res.bn = bn; res.bc = bc; res.threads = threads; res.fuse_type = fuse_type; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); res.fused_relu_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn, &ld, &ld, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV); if ( res.fused_relu_kernel == NULL ) { fprintf( stderr, "JIT for TPP fused_relu_kernel failed. Bailing...!\n"); exit(-1); } return res; } my_fc_fwd_config setup_my_fc_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) { my_fc_fwd_config res; libxsmm_blasint lda = bk; libxsmm_blasint ldb = bc; libxsmm_blasint ldc = bk; libxsmm_blasint ld_zero = bk*bn; libxsmm_blasint ld_upconvert = K; float alpha = 1.0f; float beta = 1.0f; float zerobeta = 0.0f; libxsmm_meltw_flags fusion_flags; int l_flags, l_tc_flags; int l_tr_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); libxsmm_blasint unroll_hint; /* setting up some handle values */ res.N = N; res.C = C; res.K = K; res.bn = bn; res.bc = bc; res.bk = bk; res.threads = threads; res.fuse_type = fuse_type; /* setup parallelization strategy */ res.fwd_M_hyperpartitions = 1; res.fwd_N_hyperpartitions = 1; if (threads == 16) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 2; res.fwd_row_teams = 8; } else if (threads == 14) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 2; res.fwd_row_teams = 7; } else if (threads == 56) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 1; res.fwd_row_teams = 14; res.fwd_M_hyperpartitions = 1; res.fwd_N_hyperpartitions = 4; } else { res.fwd_bf = 1; res.fwd_2d_blocking = 0; res.fwd_col_teams = 1; res.fwd_row_teams = 1; } #if 0 res.fwd_bf = atoi(getenv("FWD_BF")); res.fwd_2d_blocking = atoi(getenv("FWD_2D_BLOCKING")); res.fwd_col_teams = atoi(getenv("FWD_COL_TEAMS")); res.fwd_row_teams = atoi(getenv("FWD_ROW_TEAMS")); #endif /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); unroll_hint = (res.C/res.bc)/res.fwd_bf; res.fwd_config_kernel = libxsmm_bsmmdispatch(res.bk, res.bn, res.bc, &lda, &ldb, &ldc, NULL, &beta, &l_tc_flags, NULL); if ( res.fwd_config_kernel == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP fwd_config_kernel failed. Bailing...!\n"); exit(-1); } res.gemm_fwd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &beta, &l_flags, NULL); if ( res.gemm_fwd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd failed. Bailing...!\n"); exit(-1); } res.gemm_fwd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_fwd2 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd2 failed. Bailing...!\n"); exit(-1); } res.gemm_fwd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_fwd3 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd3 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_OVERWRITE_C; res.gemm_fwd4 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if ( res.gemm_fwd4 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd4 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_ACT_RELU_OVERWRITE_C; res.gemm_fwd5 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if ( res.gemm_fwd5 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd5 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_ACT_SIGM_OVERWRITE_C; res.gemm_fwd6 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if ( res.gemm_fwd6 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd6 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_ACT_RELU_OVERWRITE_C; res.gemm_fwd7 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if ( res.gemm_fwd7 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd7 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_ACT_SIGM_OVERWRITE_C; res.gemm_fwd8 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if ( res.gemm_fwd8 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd8 failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.fwd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if ( res.fwd_cvtfp32bf16_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_cvtfp32bf16_relu_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK, LIBXSMM_MELTW_TYPE_UNARY_RELU); if ( res.fwd_cvtfp32bf16_relu_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_cvtfp32bf16_relu_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_sigmoid_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_SIGMOID); if ( res.fwd_sigmoid_cvtfp32bf16_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_sigmoid_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.tilerelease_kernel = libxsmm_bsmmdispatch(res.bk, res.bk, res.bk, NULL, NULL, NULL, NULL, NULL, &l_tr_flags, NULL); if ( res.tilerelease_kernel == NULL ) { fprintf( stderr, "JIT for TPP tilerelease_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_zero_kernel = libxsmm_dispatch_meltw_unary(bn*bk, 1, &ld_zero, &ld_zero, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if ( res.fwd_zero_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_zero_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_colbcast_bf16fp32_copy_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_BCAST_COL, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY ); if ( res.fwd_colbcast_bf16fp32_copy_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_colbcast_bf16fp32_copy_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_copy_bf16fp32_kernel = libxsmm_dispatch_meltw_unary(K, 1, &ld_upconvert, &ld_upconvert, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if ( res.fwd_copy_bf16fp32_kernel == NULL ) { fprintf( stderr, "JIT for TPP fwd_copy_bf16fp32_kernel failed. Bailing...!\n"); exit(-1); } /* init scratch */ res.scratch_size = sizeof(float) * LIBXSMM_MAX(res.K * res.N, res.threads * LIBXSMM_MAX(res.bk * res.bn, res.K)); return res; } my_fc_bwd_config setup_my_fc_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type, float lr) { my_fc_bwd_config res; libxsmm_blasint lda = bk; libxsmm_blasint ldb = bc; libxsmm_blasint ldc = bk; libxsmm_blasint ld_zero_bwd = bc*bn; libxsmm_blasint ld_zero_upd = bk; libxsmm_blasint delbias_K = K; libxsmm_blasint delbias_N = N; float alpha = 1.0f; float beta = 1.0f; float zerobeta = 0.0f; libxsmm_blasint updM; libxsmm_blasint updN; int l_flags, l_tc_flags; int l_tr_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); libxsmm_blasint unroll_hint; size_t size_bwd_scratch; size_t size_upd_scratch; libxsmm_blasint bbk; libxsmm_blasint bbc; libxsmm_blasint ldaT = bc; libxsmm_blasint ldb_orig= bc; libxsmm_meltw_flags fusion_flags_bwd; libxsmm_meltw_operation bwd_fused_op; /* setting up some handle values */ res.N = N; res.C = C; res.K = K; res.bn = bn; res.bc = bc; res.bk = bk; res.threads = threads; res.fuse_type = fuse_type; res.fuse_relu_bwd = 0; res.lr = lr; /* setup parallelization strategy */ res.bwd_M_hyperpartitions = 1; res.upd_M_hyperpartitions = 1; res.bwd_N_hyperpartitions = 1; res.upd_N_hyperpartitions = 1; if (threads == 16) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 2; res.bwd_row_teams = 8; res.upd_bf = 1; res.upd_2d_blocking = 1; res.upd_col_teams = 2; res.upd_row_teams = 8; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else if (threads == 14) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 2; res.bwd_row_teams = 7; res.upd_bf = 1; res.upd_2d_blocking = 1; res.upd_col_teams = 2; res.upd_row_teams = 7; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else if (threads == 56) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 1; res.bwd_row_teams = 14; res.bwd_M_hyperpartitions = 1; res.bwd_N_hyperpartitions = 4; res.upd_bf = 1; res.upd_2d_blocking = 1; res.upd_col_teams = 1; res.upd_row_teams = 14; res.upd_M_hyperpartitions = 1; res.upd_N_hyperpartitions = 4; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else { res.bwd_bf = 1; res.bwd_2d_blocking = 0; res.bwd_col_teams = 1; res.bwd_row_teams = 1; res.upd_bf = 1; res.upd_2d_blocking = 0; res.upd_col_teams = 1; res.upd_row_teams = 1; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } bbk = (res.upd_2d_blocking == 1) ? bk : bk/res.ofm_subtasks; bbc = (res.upd_2d_blocking == 1) ? bc : bc/res.ifm_subtasks; #if 0 res.bwd_bf = atoi(getenv("BWD_BF")); res.bwd_2d_blocking = atoi(getenv("BWD_2D_BLOCKING")); res.bwd_col_teams = atoi(getenv("BWD_COL_TEAMS")); res.bwd_row_teams = atoi(getenv("BWD_ROW_TEAMS")); res.upd_bf = atoi(getenv("UPD_BF")); res.upd_2d_blocking = atoi(getenv("UPD_2D_BLOCKING")); res.upd_col_teams = atoi(getenv("UPD_COL_TEAMS")); res.upd_row_teams = atoi(getenv("UPD_ROW_TEAMS")); res.ifm_subtasks = atoi(getenv("IFM_SUBTASKS")); res.ofm_subtasks = atoi(getenv("OFM_SUBTASKS")); #endif if (res.bwd_2d_blocking != 1) { printf("Requested private wt transposes, but for the current # of threads the bwd decomposition is not 2D. Will perform upfront/shared wt transposes...\n"); } if (res.upd_2d_blocking != 1) { printf("Requested private act transposes, but for the current # of threads the upd decomposition is not 2D. Will perform upfront/shared act transposes...\n"); } if (res.upd_2d_blocking != 1) { printf("Requested private dact transposes, but for the current # of threads the upd decomposition is not 2D. Will perform upfront/shared dact transposes...\n"); } /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ /* BWD GEMM */ l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); unroll_hint = (res.K/res.bk)/res.bwd_bf; res.gemm_bwd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &beta, &l_flags, NULL); if ( res.gemm_bwd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd failed. Bailing...!\n"); exit(-1); } res.gemm_bwd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_bwd2 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd2 failed. Bailing...!\n"); exit(-1); } res.gemm_bwd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_bwd3 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd3 failed. Bailing...!\n"); exit(-1); } res.bwd_config_kernel = libxsmm_bsmmdispatch(res.bc, res.bn, res.bk, &ldb, &lda, &ldb, NULL, &beta, &l_tc_flags, NULL); if ( res.bwd_config_kernel == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP bwd_config_kernel failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.bwd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn, &ldb, &ldb, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if ( res.bwd_cvtfp32bf16_kernel == NULL ) { fprintf( stderr, "JIT for TPP bwd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.bwd_relu_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV); if ( res.bwd_relu_kernel == NULL ) { fprintf( stderr, "JIT for TPP bwd_relu_kernel failed. Bailing...!\n"); exit(-1); } res.bwd_zero_kernel = libxsmm_dispatch_meltw_unary(bn*bc, 1, &ld_zero_bwd, &ld_zero_bwd, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if ( res.bwd_zero_kernel == NULL ) { fprintf( stderr, "JIT for TPP bwd_zero_kernel failed. Bailing...!\n"); exit(-1); } /* JITing the tranpose kernel */ res.vnni_to_vnniT_kernel = libxsmm_dispatch_meltw_unary(bk, bc, &lda, &ldaT, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_VNNI_TO_VNNIT); if ( res.vnni_to_vnniT_kernel == NULL ) { fprintf( stderr, "JIT for TPP vnni_to_vnniT_kernel failed. Bailing...!\n"); exit(-1); } bwd_fused_op = LIBXSMM_MELTW_OPERATION_COLBIAS_ACT; fusion_flags_bwd = LIBXSMM_MELTW_FLAG_ACT_RELU_BWD_OVERWRITE_C; res.gemm_bwd5 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL, bwd_fused_op, LIBXSMM_DATATYPE_BF16, fusion_flags_bwd, 0, 0, 0, 0); if ( res.gemm_bwd5 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd5 failed. Bailing...!\n"); exit(-1); } if (((fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) && (res.upd_2d_blocking == 1)) { res.fuse_relu_bwd = 1; } res.bwd_fused_relu_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn, &ldb, &ldb, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV); if ( res.bwd_fused_relu_kernel == NULL ) { fprintf( stderr, "JIT for TPP bwd_fused_relu_kernel failed. Bailing...!\n"); exit(-1); } /* UPD GEMM */ lda = res.bk; ldb = res.bn; ldc = res.bk; updM = res.bk/res.ofm_subtasks; updN = res.bc/res.ifm_subtasks; l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ); unroll_hint = (res.N/res.bn)/res.upd_bf; res.gemm_upd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk*res.bn*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &beta, &l_flags, NULL); if ( res.gemm_upd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_upd failed. Bailing...!\n"); exit(-1); } res.gemm_upd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk*res.bn*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_upd2 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_upd2 failed. Bailing...!\n"); exit(-1); } l_flags = l_flags | LIBXSMM_GEMM_FLAG_VNNI_C; res.gemm_upd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk*res.bn*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if ( res.gemm_upd3 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_upd3 failed. Bailing...!\n"); exit(-1); } res.upd_config_kernel = libxsmm_bsmmdispatch(updM, updN, res.bn, &lda, &ldb, &ldc, NULL, &beta, &l_tc_flags, NULL); if ( res.upd_config_kernel == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP upd_config_kernel failed. Bailing...!\n"); exit(-1); } res.tilerelease_kernel = libxsmm_bsmmdispatch(res.bk, res.bk, res.bk, NULL, NULL, NULL, NULL, NULL, &l_tr_flags, NULL); if ( res.tilerelease_kernel == NULL ) { fprintf( stderr, "JIT for TPP tilerelease_kernel failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.upd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(bbk, bbc, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if ( res.upd_cvtfp32bf16_kernel == NULL ) { fprintf( stderr, "JIT for TPP upd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.upd_zero_kernel = libxsmm_dispatch_meltw_unary(bbk, bbc, &ld_zero_upd, &ld_zero_upd, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if ( res.upd_zero_kernel == NULL ) { fprintf( stderr, "JIT for TPP upd_zero_kernel failed. Bailing...!\n"); exit(-1); } res.delbias_reduce_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &delbias_K, &delbias_N, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD_NCNC_FORMAT); if ( res.delbias_reduce_kernel == NULL ) { fprintf( stderr, "JIT for TPP delbias_reduce_kernel failed. Bailing...!\n"); exit(-1); } /* JITing the tranpose kernels */ res.norm_to_vnni_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &lda, &lda, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI); if ( res.norm_to_vnni_kernel == NULL ) { fprintf( stderr, "JIT for TPP norm_to_vnni_kernel failed. Bailing...!\n"); exit(-1); } res.norm_to_vnni_kernel_wt = libxsmm_dispatch_meltw_unary(bbk, bbc, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI); if ( res.norm_to_vnni_kernel_wt == NULL ) { fprintf( stderr, "JIT for TPP norm_to_vnni_kernel failed. Bailing...!\n"); exit(-1); } res.norm_to_normT_kernel = libxsmm_dispatch_meltw_unary(bc, bn, &ldb, &ldb_orig, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_NORMT); if ( res.norm_to_normT_kernel == NULL ) { fprintf( stderr, "JIT for TPP norm_to_normT_kernel failed. Bailing...!\n"); exit(-1); } /* init scratch */ size_bwd_scratch = sizeof(float) * LIBXSMM_MAX(res.C * res.N, res.threads * res.bc * res.bn) + sizeof(libxsmm_bfloat16) * res.C * res.K; res.bwd_private_tr_wt_scratch_mark = size_bwd_scratch; size_bwd_scratch += res.threads * res.bc * res.K * sizeof(libxsmm_bfloat16); size_upd_scratch = sizeof(float) * LIBXSMM_MAX(res.C * res.K, res.threads * res.bc * res.bk) + sizeof(libxsmm_bfloat16) * res.threads * res.bk * res.bc + sizeof(libxsmm_bfloat16) * (res.N * (res.C + res.K)); res.scratch_size = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) + sizeof(libxsmm_bfloat16) * res.N * res.K; res.doutput_scratch_mark = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) ; res.upd_private_tr_dact_scratch_mark = res.scratch_size; res.scratch_size += res.threads * res.bk * res.N * sizeof(libxsmm_bfloat16); res.upd_private_tr_act_scratch_mark = res.scratch_size; res.scratch_size += res.threads * res.bc * res.N * (((res.C/res.bc)+res.upd_col_teams-1)/res.upd_col_teams) * sizeof(libxsmm_bfloat16); return res; } my_opt_config setup_my_opt(libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, float lr) { my_opt_config res; /* setting up some handle values */ res.C = C; res.K = K; res.bc = bc; res.bk = bk; res.threads = threads; if (threads == 16) { res.opt_2d_blocking = 1; res.opt_col_teams = 2; res.opt_row_teams = 8; } else if (threads == 14) { res.opt_2d_blocking = 1; res.opt_col_teams = 2; res.opt_row_teams = 7; } else { res.opt_2d_blocking = 0; res.opt_col_teams = 1; res.opt_row_teams = 1; } res.lr = lr; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = 0; return res; } my_smax_fwd_config setup_my_smax_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads) { my_smax_fwd_config res; /* setting up some handle values */ res.C = C; res.N = N; res.bc = bc; res.bn = bn; res.threads = threads; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = (sizeof(float)*res.C*res.N*2);; return res; } my_smax_bwd_config setup_my_smax_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads, float loss_weight) { my_smax_bwd_config res; /* setting up some handle values */ res.C = C; res.N = N; res.bc = bc; res.bn = bn; res.threads = threads; res.loss_weight = loss_weight; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = (sizeof(float)*res.C*res.N*2); return res; } void my_fc_fwd_exec( my_fc_fwd_config cfg, const libxsmm_bfloat16* wt_ptr, const libxsmm_bfloat16* in_act_ptr, libxsmm_bfloat16* out_act_ptr, const libxsmm_bfloat16* bias_ptr, unsigned char* relu_ptr, int start_tid, int my_tid, void* scratch ) { const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint lpb = 2; const libxsmm_blasint bc_lp = cfg.bc/lpb; /* const libxsmm_blasint bc = cfg.bc;*/ libxsmm_blasint use_2d_blocking = cfg.fwd_2d_blocking; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksOFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* loop variables */ libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ifm1 = 0; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, output, out_act_ptr, nBlocksOFm, cfg.bn, cfg.bk); LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, input, in_act_ptr, nBlocksIFm, cfg.bn, cfg.bc); LIBXSMM_VLA_DECL(5, const libxsmm_bfloat16, filter, wt_ptr, nBlocksIFm, bc_lp, cfg.bk, lpb); LIBXSMM_VLA_DECL(4, float, output_f32, (float*)scratch, nBlocksOFm, bn, bk); libxsmm_meltw_gemm_param gemm_eltwise_params; float* fp32_bias_scratch = ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (float*)scratch + ltid * cfg.K : NULL; LIBXSMM_VLA_DECL(2, const libxsmm_bfloat16, bias, ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (libxsmm_bfloat16*) bias_ptr : NULL, cfg.bk); LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32*)relu_ptr : NULL, nBlocksOFm, cfg.bn, cfg.bk/32); libxsmm_meltwfunction_unary eltwise_kernel_act = cfg.fwd_cvtfp32bf16_relu_kernel; libxsmm_meltw_unary_param eltwise_params_act; libxsmm_meltwfunction_unary eltwise_kernel = cfg.fwd_cvtfp32bf16_kernel; libxsmm_meltw_unary_param eltwise_params; libxsmm_bmmfunction_reducebatch_strd_meltwfused bf16_batchreduce_kernel_zerobeta_fused_eltwise; libxsmm_meltw_unary_param copy_params; unsigned long long blocks = nBlocksIFm; libxsmm_blasint CB_BLOCKS = nBlocksIFm, BF = 1; if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) && ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU )) { bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd7; } else if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd4; } else if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd5; } else { bf16_batchreduce_kernel_zerobeta_fused_eltwise = NULL; } BF = cfg.fwd_bf; CB_BLOCKS = nBlocksIFm/BF; blocks = CB_BLOCKS; if (use_2d_blocking == 1) { int _ltid, M_hyperpartition_id, N_hyperpartition_id, _nBlocksOFm, _nBlocksMB, hyperteam_id; col_teams = cfg.fwd_col_teams; row_teams = cfg.fwd_row_teams; hyperteam_id = ltid/(col_teams*row_teams); _nBlocksOFm = nBlocksOFm/cfg.fwd_M_hyperpartitions; _nBlocksMB = nBlocksMB/cfg.fwd_N_hyperpartitions; _ltid = ltid % (col_teams * row_teams); M_hyperpartition_id = hyperteam_id % cfg.fwd_M_hyperpartitions; N_hyperpartition_id = hyperteam_id / cfg.fwd_M_hyperpartitions; my_row_id = _ltid % row_teams; my_col_id = _ltid / row_teams; N_tasks_per_thread = (_nBlocksMB + col_teams-1)/col_teams; M_tasks_per_thread = (_nBlocksOFm + row_teams-1)/row_teams; my_N_start = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN( my_col_id * N_tasks_per_thread, _nBlocksMB); my_N_end = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN( (my_col_id+1) * N_tasks_per_thread, _nBlocksMB); my_M_start = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN( my_row_id * M_tasks_per_thread, _nBlocksOFm); my_M_end = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN( (my_row_id+1) * M_tasks_per_thread, _nBlocksOFm); } /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); cfg.fwd_config_kernel(NULL, NULL, NULL); if (use_2d_blocking == 1) { if (BF > 1) { for ( ifm1 = 0; ifm1 < BF; ++ifm1 ) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { if ( ifm1 == 0 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0,cfg.bk); copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm,cfg.bn,cfg.bk); cfg.fwd_colbcast_bf16fp32_copy_kernel(&copy_params); } else { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); cfg.fwd_zero_kernel(&copy_params); } } cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); if ( ifm1 == BF-1 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { eltwise_params_act.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); eltwise_kernel_act(&eltwise_params_act); } else { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_kernel(&eltwise_params); } } } } } } else { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, 0, 0,cfg.bk); copy_params.out.primary = fp32_bias_scratch; cfg.fwd_copy_bf16fp32_kernel(&copy_params); } for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { if ( ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) || ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU )) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { gemm_eltwise_params.bias_ptr = (float*) fp32_bias_scratch + ofm1 * cfg.bk; } if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { gemm_eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); } bf16_batchreduce_kernel_zerobeta_fused_eltwise( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks, &gemm_eltwise_params); } else { cfg.gemm_fwd3( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks); } } } } } else { if (BF > 1) { for ( ifm1 = 0; ifm1 < BF; ++ifm1 ) { for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; /* Initialize libxsmm_blasintermediate f32 tensor */ if ( ifm1 == 0 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0,cfg.bk); copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm,cfg.bn,cfg.bk); cfg.fwd_colbcast_bf16fp32_copy_kernel(&copy_params); } else { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); cfg.fwd_zero_kernel(&copy_params); } } cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); if ( ifm1 == BF-1 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { eltwise_params_act.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); eltwise_kernel_act(&eltwise_params_act); } else { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_kernel(&eltwise_params); } } } } } else { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, 0, 0,cfg.bk); copy_params.out.primary = fp32_bias_scratch; cfg.fwd_copy_bf16fp32_kernel(&copy_params); } for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; if ( ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) || ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU )) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { gemm_eltwise_params.bias_ptr = (float*) fp32_bias_scratch + ofm1 * cfg.bk; } if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { gemm_eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); } bf16_batchreduce_kernel_zerobeta_fused_eltwise( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks, &gemm_eltwise_params); } else { cfg.gemm_fwd3( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks); } } } } cfg.tilerelease_kernel(NULL, NULL, NULL); libxsmm_barrier_wait(cfg.barrier, ltid); } void my_fc_bwd_exec( my_fc_bwd_config cfg, libxsmm_bfloat16* wt_ptr, libxsmm_bfloat16* din_act_ptr, const libxsmm_bfloat16* dout_act_ptr, libxsmm_bfloat16* dwt_ptr, const libxsmm_bfloat16* in_act_ptr, libxsmm_bfloat16* dbias_ptr, const unsigned char* relu_ptr, my_pass pass, int start_tid, int my_tid, void* scratch, float *fil_master ) { /* size variables, all const */ /* here we assume that input and output blocking is similar */ const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint bc = cfg.bc; libxsmm_blasint lpb = 2; const libxsmm_blasint bc_lp = bc/lpb; const libxsmm_blasint bk_lp = bk/lpb; const libxsmm_blasint bn_lp = bn/lpb; const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ofm2 = 0; libxsmm_blasint performed_doutput_transpose = 0; libxsmm_meltw_unary_param trans_param; unsigned int i; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint eltwise_work = nBlocksOFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint eltwise_chunksize = (eltwise_work % cfg.threads == 0) ? (eltwise_work / cfg.threads) : ((eltwise_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint eltwise_thr_begin = (ltid * eltwise_chunksize < eltwise_work) ? (ltid * eltwise_chunksize) : eltwise_work; const libxsmm_blasint eltwise_thr_end = ((ltid + 1) * eltwise_chunksize < eltwise_work) ? ((ltid + 1) * eltwise_chunksize) : eltwise_work; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint dbias_work = nBlocksOFm; /* compute chunk size */ const libxsmm_blasint dbias_chunksize = (dbias_work % cfg.threads == 0) ? (dbias_work / cfg.threads) : ((dbias_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint dbias_thr_begin = (ltid * dbias_chunksize < dbias_work) ? (ltid * dbias_chunksize) : dbias_work; const libxsmm_blasint dbias_thr_end = ((ltid + 1) * dbias_chunksize < dbias_work) ? ((ltid + 1) * dbias_chunksize) : dbias_work; LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, dbias, ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (libxsmm_bfloat16*) dbias_ptr : NULL, cfg.bk); libxsmm_blasint ext_blocks = (cfg.fuse_relu_bwd == 1) ? nBlocksIFm : nBlocksOFm; libxsmm_blasint int_blocks = (cfg.fuse_relu_bwd == 1) ? cfg.bc : cfg.bk; LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32*)relu_ptr : NULL, ext_blocks, cfg.bn, int_blocks/32); libxsmm_bfloat16 *grad_output_ptr = (libxsmm_bfloat16*)dout_act_ptr; libxsmm_bfloat16 *tr_doutput_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) ? (libxsmm_bfloat16*)((char*)scratch + cfg.doutput_scratch_mark) : (libxsmm_bfloat16*)scratch; LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, doutput_orig, (libxsmm_bfloat16*)dout_act_ptr, nBlocksOFm, bn, bk); libxsmm_meltw_unary_param relu_params; libxsmm_meltwfunction_unary relu_kernel = cfg.bwd_relu_kernel; LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, doutput, grad_output_ptr, nBlocksOFm, bn, bk); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, doutput_tr, tr_doutput_ptr, nBlocksMB, bn_lp, bk, lpb); libxsmm_meltwfunction_unary eltwise_kernel = cfg.bwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary eltwise_kernel2 = cfg.upd_cvtfp32bf16_kernel; libxsmm_meltw_unary_param eltwise_params; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param delbias_params; libxsmm_meltw_gemm_param eltwise_params_bwd; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); cfg.bwd_config_kernel(NULL, NULL, NULL); if (cfg.upd_2d_blocking == 0) { /* Apply to doutput potential fusions */ if (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) { for ( mb1ofm1 = eltwise_thr_begin; mb1ofm1 < eltwise_thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1/nBlocksOFm; ofm1 = mb1ofm1%nBlocksOFm; relu_params.in.primary =(void*) &LIBXSMM_VLA_ACCESS(4, doutput_orig, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); relu_params.out.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); relu_params.in.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32); relu_kernel(&relu_params); /* If in UPD pass, also perform transpose of doutput */ if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, mb1, 0, 0, 0, nBlocksMB, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } } if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) { performed_doutput_transpose = 1; } libxsmm_barrier_wait(cfg.barrier, ltid); } /* Accumulation of bias happens in f32 */ if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS)) { for ( ofm1 = dbias_thr_begin; ofm1 < dbias_thr_end; ++ofm1 ) { delbias_params.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); delbias_params.out.primary = &LIBXSMM_VLA_ACCESS(2, dbias, ofm1, 0, cfg.bk); cfg.delbias_reduce_kernel(&delbias_params); } /* wait for eltwise to finish */ libxsmm_barrier_wait(cfg.barrier, ltid); } } if ( (pass & MY_PASS_BWD_D) == MY_PASS_BWD_D ){ libxsmm_blasint use_2d_blocking = cfg.bwd_2d_blocking; /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksIFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint transpose_work = nBlocksIFm * nBlocksOFm; /* compute chunk size */ const libxsmm_blasint transpose_chunksize = (transpose_work % cfg.threads == 0) ? (transpose_work / cfg.threads) : ((transpose_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint transpose_thr_begin = (ltid * transpose_chunksize < transpose_work) ? (ltid * transpose_chunksize) : transpose_work; const libxsmm_blasint transpose_thr_end = ((ltid + 1) * transpose_chunksize < transpose_work) ? ((ltid + 1) * transpose_chunksize) : transpose_work; /* loop variables */ libxsmm_blasint ifm1 = 0, ifm1ofm1 = 0, mb1ifm1 = 0; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter, (libxsmm_bfloat16*)wt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, dinput, (libxsmm_bfloat16* )din_act_ptr, nBlocksIFm, bn, bc); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter_tr, (libxsmm_bfloat16*)scratch, nBlocksOFm, bk_lp, bc, lpb); float* temp_output = (float*)scratch + (cfg.C * cfg.K)/2; LIBXSMM_VLA_DECL(4, float, dinput_f32, (float*) temp_output, nBlocksIFm, bn, bc); unsigned long long blocks = nBlocksOFm; libxsmm_blasint KB_BLOCKS = nBlocksOFm, BF = 1; BF = cfg.bwd_bf; KB_BLOCKS = nBlocksOFm/BF; blocks = KB_BLOCKS; if (use_2d_blocking == 1) { int _ltid, M_hyperpartition_id, N_hyperpartition_id, _nBlocksIFm, _nBlocksMB, hyperteam_id; col_teams = cfg.bwd_col_teams; row_teams = cfg.bwd_row_teams; hyperteam_id = ltid/(col_teams*row_teams); _nBlocksIFm = nBlocksIFm/cfg.bwd_M_hyperpartitions; _nBlocksMB = nBlocksMB/cfg.bwd_N_hyperpartitions; _ltid = ltid % (col_teams * row_teams); M_hyperpartition_id = hyperteam_id % cfg.bwd_M_hyperpartitions; N_hyperpartition_id = hyperteam_id / cfg.bwd_M_hyperpartitions; my_row_id = _ltid % row_teams; my_col_id = _ltid / row_teams; N_tasks_per_thread = (_nBlocksMB + col_teams-1)/col_teams; M_tasks_per_thread = (_nBlocksIFm + row_teams-1)/row_teams; my_N_start = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN( my_col_id * N_tasks_per_thread, _nBlocksMB); my_N_end = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN( (my_col_id+1) * N_tasks_per_thread, _nBlocksMB); my_M_start = M_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN( my_row_id * M_tasks_per_thread, _nBlocksIFm); my_M_end = M_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN( (my_row_id+1) * M_tasks_per_thread, _nBlocksIFm); } /* transpose weight */ if (cfg.bwd_2d_blocking == 0) { for (ifm1ofm1 = transpose_thr_begin; ifm1ofm1 < transpose_thr_end; ++ifm1ofm1) { ofm1 = ifm1ofm1 / nBlocksIFm; ifm1 = ifm1ofm1 % nBlocksIFm; trans_param.in.primary = &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb); cfg.vnni_to_vnniT_kernel(&trans_param); } /* wait for transpose to finish */ libxsmm_barrier_wait(cfg.barrier, ltid); } if (use_2d_blocking == 1) { LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, tmp_filter_tr, ((libxsmm_bfloat16*)((char*)scratch + cfg.bwd_private_tr_wt_scratch_mark)) + ltid * bc * cfg.K, bk_lp, bc, lpb); if (BF > 1) { for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) { for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) { for (ofm2 = ofm1*KB_BLOCKS; ofm2 < (ofm1+1)*KB_BLOCKS; ofm2++) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(5, filter, ofm2, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, ofm2, 0, 0, 0, bk_lp, bc, lpb); cfg.vnni_to_vnniT_kernel(&trans_param); } for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { /* Initialize libxsmm_blasintermediate f32 tensor */ if ( ofm1 == 0 ) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); cfg.bwd_zero_kernel(&copy_params); } cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, ofm1*KB_BLOCKS, 0, 0, 0, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); /* downconvert libxsmm_blasintermediate f32 tensor to bf 16 and store to final C */ if ( ofm1 == BF-1 ) { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_kernel(&eltwise_params); if (cfg.fuse_relu_bwd > 0) { relu_params.in.primary =(void*) &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); relu_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); relu_params.in.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ifm1, 0, 0, nBlocksIFm, cfg.bn, cfg.bc/32); cfg.bwd_fused_relu_kernel(&relu_params); } } } } } } else { for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) { for (ofm2 = 0; ofm2 < nBlocksOFm; ofm2++) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(5, filter, ofm2, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, ofm2, 0, 0, 0, bk_lp, bc, lpb); cfg.vnni_to_vnniT_kernel(&trans_param); } for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { if (cfg.fuse_relu_bwd > 0) { eltwise_params_bwd.relu_bitmask_bwd = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ifm1, 0, 0, nBlocksIFm, cfg.bn, cfg.bc/32); cfg.gemm_bwd5( &LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, 0, 0, 0, 0, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks, &eltwise_params_bwd); } else { cfg.gemm_bwd3( &LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, 0, 0, 0, 0, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } } } else { if (BF > 1) { for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) { for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; /* Initialize libxsmm_blasintermediate f32 tensor */ if ( ofm1 == 0 ) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); cfg.bwd_zero_kernel(&copy_params); } cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); /* downconvert libxsmm_blasintermediate f32 tensor to bf 16 and store to final C */ if ( ofm1 == BF-1 ) { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_kernel(&eltwise_params); } } } } else { for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; cfg.gemm_bwd3( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, 0, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } libxsmm_barrier_wait(cfg.barrier, ltid); } if (cfg.upd_2d_blocking == 1) { /* Accumulation of bias happens in f32 */ if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS)) { for ( ofm1 = dbias_thr_begin; ofm1 < dbias_thr_end; ++ofm1 ) { delbias_params.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); delbias_params.out.primary = &LIBXSMM_VLA_ACCESS(2, dbias, ofm1, 0, cfg.bk); cfg.delbias_reduce_kernel(&delbias_params); } } } if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) { /* number of tasks that could be run in parallel */ const libxsmm_blasint ofm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ofm_subtasks; const libxsmm_blasint ifm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ifm_subtasks; const libxsmm_blasint bbk = (cfg.upd_2d_blocking == 1) ? bk : bk/ofm_subtasks; const libxsmm_blasint bbc = (cfg.upd_2d_blocking == 1) ? bc : bc/ifm_subtasks; const libxsmm_blasint work = nBlocksIFm * ifm_subtasks * nBlocksOFm * ofm_subtasks; const libxsmm_blasint Cck_work = nBlocksIFm * ifm_subtasks * ofm_subtasks; const libxsmm_blasint Cc_work = nBlocksIFm * ifm_subtasks; /* 2D blocking parameters */ libxsmm_blasint use_2d_blocking = cfg.upd_2d_blocking; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; libxsmm_blasint BF = cfg.upd_bf; /* loop variables */ libxsmm_blasint ifm1ofm1 = 0, ifm1 = 0, ifm2 = 0, mb3 = 0, bfn = 0, mb1ifm1 = 0; /* Batch reduce related variables */ unsigned long long blocks = nBlocksMB/BF; LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, input, (libxsmm_bfloat16* )in_act_ptr, nBlocksIFm, bn, bc); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, dfilter, (libxsmm_bfloat16*)dwt_ptr, nBlocksIFm, bc_lp, bk, lpb); /* Set up tensors for transposing/scratch before vnni reformatting dfilter */ libxsmm_bfloat16 *tr_inp_ptr = (libxsmm_bfloat16*) ((libxsmm_bfloat16*)scratch + cfg.N * cfg.K); float *dfilter_f32_ptr = (float*) ((libxsmm_bfloat16*)tr_inp_ptr + cfg.N * cfg.C); #ifndef BYPASS_SGD libxsmm_bfloat16 *dfilter_scratch = (libxsmm_bfloat16*) ((float*)dfilter_f32_ptr + cfg.C * cfg.K) + ltid * bc * bk; #endif LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, input_tr, (libxsmm_bfloat16*)tr_inp_ptr, nBlocksMB, bc, bn); LIBXSMM_VLA_DECL(4, float, dfilter_f32, (float*)dfilter_f32_ptr, nBlocksIFm, bc, bk); #ifndef BYPASS_SGD LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, dfilter_block, (libxsmm_bfloat16*)dfilter_scratch, bk); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter, (libxsmm_bfloat16*)wt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(4, float, master_filter, (float*)fil_master, nBlocksIFm, bc, bk); #endif const libxsmm_blasint tr_out_work = nBlocksMB * nBlocksOFm; const libxsmm_blasint tr_out_chunksize = (tr_out_work % cfg.threads == 0) ? (tr_out_work / cfg.threads) : ((tr_out_work / cfg.threads) + 1); const libxsmm_blasint tr_out_thr_begin = (ltid * tr_out_chunksize < tr_out_work) ? (ltid * tr_out_chunksize) : tr_out_work; const libxsmm_blasint tr_out_thr_end = ((ltid + 1) * tr_out_chunksize < tr_out_work) ? ((ltid + 1) * tr_out_chunksize) : tr_out_work; const libxsmm_blasint tr_inp_work = nBlocksMB * nBlocksIFm; const libxsmm_blasint tr_inp_chunksize = (tr_inp_work % cfg.threads == 0) ? (tr_inp_work / cfg.threads) : ((tr_inp_work / cfg.threads) + 1); const libxsmm_blasint tr_inp_thr_begin = (ltid * tr_inp_chunksize < tr_inp_work) ? (ltid * tr_inp_chunksize) : tr_inp_work; const libxsmm_blasint tr_inp_thr_end = ((ltid + 1) * tr_inp_chunksize < tr_inp_work) ? ((ltid + 1) * tr_inp_chunksize) : tr_inp_work; if (use_2d_blocking == 1) { int _ltid, M_hyperpartition_id, N_hyperpartition_id, _nBlocksOFm, _nBlocksIFm, hyperteam_id; col_teams = cfg.upd_col_teams; row_teams = cfg.upd_row_teams; hyperteam_id = ltid/(col_teams*row_teams); _nBlocksOFm = nBlocksOFm/cfg.upd_M_hyperpartitions; _nBlocksIFm = nBlocksIFm/cfg.upd_N_hyperpartitions; _ltid = ltid % (col_teams * row_teams); M_hyperpartition_id = hyperteam_id % cfg.upd_M_hyperpartitions; N_hyperpartition_id = hyperteam_id / cfg.upd_M_hyperpartitions; my_row_id = _ltid % row_teams; my_col_id = _ltid / row_teams; N_tasks_per_thread = (_nBlocksIFm + col_teams-1)/col_teams; M_tasks_per_thread = (_nBlocksOFm + row_teams-1)/row_teams; my_N_start = N_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN( my_col_id * N_tasks_per_thread, _nBlocksIFm); my_N_end = N_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN( (my_col_id+1) * N_tasks_per_thread, _nBlocksIFm); my_M_start = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN( my_row_id * M_tasks_per_thread, _nBlocksOFm); my_M_end = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN( (my_row_id+1) * M_tasks_per_thread, _nBlocksOFm); } if (cfg.upd_2d_blocking == 0) { /* Required upfront tranposes */ for (mb1ifm1 = tr_inp_thr_begin; mb1ifm1 < tr_inp_thr_end; mb1ifm1++) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; trans_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, mb1, 0, 0, nBlocksMB, bc, bn); cfg.norm_to_normT_kernel(&trans_param); } } if (cfg.upd_2d_blocking == 0) { if (performed_doutput_transpose == 0) { for (mb1ofm1 = tr_out_thr_begin; mb1ofm1 < tr_out_thr_end; mb1ofm1++) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, mb1, 0, 0, 0, nBlocksMB, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } } libxsmm_barrier_wait(cfg.barrier, ltid); } if (use_2d_blocking == 1) { LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, tmp_input_tr, ((libxsmm_bfloat16*)((char*)scratch + cfg.upd_private_tr_act_scratch_mark)) + ltid * bc * cfg.N * N_tasks_per_thread, nBlocksMB, bc, bn); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, tmp_doutput_tr, ((libxsmm_bfloat16*)((char*)scratch + cfg.upd_private_tr_dact_scratch_mark)) + ltid * bk * cfg.N, bn_lp, bk, lpb); ifm2 = 0; ofm2 = 0; if (BF == 1) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { /* Transpose output block */ for (mb3 = 0; mb3 < nBlocksMB; mb3++) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb3, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, mb3, 0, 0, 0, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { /* Transpose input block */ if (ofm1 == my_M_start) { for (mb3 = 0; mb3 < nBlocksMB; mb3++) { trans_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(4, input, mb3, ifm1, 0, 0, nBlocksIFm, bn, bc); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1-my_N_start, mb3, 0, 0, nBlocksMB, bc, bn); cfg.norm_to_normT_kernel(&trans_param); } } if ((bc % 16 == 0) && (bk % 16 == 0)) { cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, 0, 0, 0, 0, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1-my_N_start, 0, 0, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb), &blocks); { __m512 vlr = _mm512_set1_ps( cfg.lr ); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float*) &LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for ( i = 0; i < bc*bk; i+=16 ) { __m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( wt_fp32+i ), _mm512_mul_ps( vlr, _mm512_load_fil( (libxsmm_bfloat16*)dwt_bf16 + i ) ) ); _mm512_store_fil( wt_bf16+i, newfilter ); _mm512_storeu_ps( wt_fp32+i, newfilter ); } } } else { cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, 0, 0, 0, 0, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1-my_N_start, 0, 0, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(2, dfilter_block, 0, 0, bk), &blocks); trans_param.in.primary = &LIBXSMM_VLA_ACCESS(2, dfilter_block, 0, 0, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); cfg.norm_to_vnni_kernel_wt(&trans_param); { __m512 vlr = _mm512_set1_ps( cfg.lr ); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float*) &LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for ( i = 0; i < bc*bk; i+=16 ) { __m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( wt_fp32+i ), _mm512_mul_ps( vlr, _mm512_load_fil( (libxsmm_bfloat16*)dwt_bf16 + i ) ) ); _mm512_store_fil( wt_bf16+i, newfilter ); _mm512_storeu_ps( wt_fp32+i, newfilter ); } } } } } } else { for (bfn = 0; bfn < BF; bfn++) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { /* Transpose output block */ for (mb3 = bfn*blocks; mb3 < (bfn+1)*blocks; mb3++) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb3, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, mb3, 0, 0, 0, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { /* Transpose input block */ if (ofm1 == my_M_start) { for (mb3 = bfn*blocks; mb3 < (bfn+1)*blocks; mb3++) { trans_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(4, input, mb3, ifm1, 0, 0, nBlocksIFm, bn, bc); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1-my_N_start, mb3, 0, 0, nBlocksMB, bc, bn); cfg.norm_to_normT_kernel(&trans_param); } } /* initialize current work task to zero */ if (bfn == 0) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk); cfg.upd_zero_kernel(&copy_params); } cfg.gemm_upd(&LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, bfn*blocks, 0, 0, 0, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1-my_N_start, bfn*blocks, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks); /* Downconvert result to BF16 and vnni format */ if (bfn == BF-1) { LIBXSMM_ALIGNED(libxsmm_bfloat16 tmp_buf[bc][bk], 64); eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); eltwise_params.out.primary = tmp_buf; trans_param.in.primary = tmp_buf; trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); eltwise_kernel2(&eltwise_params); cfg.norm_to_vnni_kernel_wt(&trans_param); #ifndef BYPASS_SGD { __m512 vlr = _mm512_set1_ps( cfg.lr ); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float*) &LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for ( i = 0; i < bc*bk; i+=16 ) { __m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( wt_fp32+i ), _mm512_mul_ps( vlr, _mm512_load_fil( (libxsmm_bfloat16*)dwt_bf16 + i ) ) ); _mm512_store_fil( wt_bf16+i, newfilter ); _mm512_storeu_ps( wt_fp32+i, newfilter ); } } #endif } } } } } } else { if (BF == 1) { for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) { ofm1 = ifm1ofm1 / Cck_work; ofm2 = (ifm1ofm1 % Cck_work) / Cc_work; ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks; ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks; cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, 0, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, 0, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, (ifm2*bbc)/lpb, ofm2*bbk, 0, nBlocksIFm, bc_lp, bk, lpb), &blocks); #ifndef BYPASS_SGD { __m512 vlr = _mm512_set1_ps( cfg.lr ); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float*) &LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for ( i = 0; i < bc*bk; i+=16 ) { __m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( wt_fp32+i ), _mm512_mul_ps( vlr, _mm512_load_fil( (libxsmm_bfloat16*)dwt_bf16 + i ) ) ); _mm512_store_fil( wt_bf16+i, newfilter ); _mm512_storeu_ps( wt_fp32+i, newfilter ); } } #endif } } else { for (bfn = 0; bfn < BF; bfn++) { for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) { ofm1 = ifm1ofm1 / Cck_work; ofm2 = (ifm1ofm1 % Cck_work) / Cc_work; ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks; ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks; /* initialize current work task to zero */ if (bfn == 0) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk); cfg.upd_zero_kernel(&copy_params); } cfg.gemm_upd(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, bfn*blocks, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, bfn*blocks, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks); /* Downconvert result to BF16 and vnni format */ if (bfn == BF-1) { LIBXSMM_ALIGNED(libxsmm_bfloat16 tmp_buf[bc][bk], 64); eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk); eltwise_params.out.primary = tmp_buf; trans_param.in.primary = tmp_buf; trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, (ifm2*bbc)/lpb, ofm2*bbk, 0, nBlocksIFm, bc_lp, bk, lpb); eltwise_kernel2(&eltwise_params); cfg.norm_to_vnni_kernel_wt(&trans_param); #ifndef BYPASS_SGD { __m512 vlr = _mm512_set1_ps( cfg.lr ); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float*) &LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for ( i = 0; i < bc*bk; i+=16 ) { __m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( wt_fp32+i ), _mm512_mul_ps( vlr, _mm512_load_fil( (libxsmm_bfloat16*)dwt_bf16 + i ) ) ); _mm512_store_fil( wt_bf16+i, newfilter ); _mm512_storeu_ps( wt_fp32+i, newfilter ); } } #endif } } } } } libxsmm_barrier_wait(cfg.barrier, ltid); } cfg.tilerelease_kernel(NULL, NULL, NULL); } void my_opt_exec( my_opt_config cfg, libxsmm_bfloat16* wt_ptr, float* master_wt_ptr, const libxsmm_bfloat16* delwt_ptr, int start_tid, int my_tid, void* scratch ) { /* loop counters */ libxsmm_blasint i; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint bc = cfg.bc; libxsmm_blasint lpb = 2; const libxsmm_blasint bc_lp = bc/lpb; const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; /* number of tasks that could run in parallel for the filters */ const libxsmm_blasint work = cfg.C * cfg.K; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* lazy barrier init */ libxsmm_barrier_init( cfg.barrier, ltid ); #if defined(__AVX512BW__) __m512 vlr = _mm512_set1_ps( cfg.lr ); if (cfg.opt_2d_blocking == 1) { libxsmm_blasint ofm1, ifm1; libxsmm_blasint col_teams = cfg.opt_col_teams; libxsmm_blasint row_teams = cfg.opt_row_teams; libxsmm_blasint my_row_id = ltid % row_teams; libxsmm_blasint my_col_id = ltid / row_teams; libxsmm_blasint N_tasks_per_thread = (nBlocksIFm + col_teams-1)/col_teams; libxsmm_blasint M_tasks_per_thread = (nBlocksOFm + row_teams-1)/row_teams; libxsmm_blasint my_N_start = LIBXSMM_MIN( my_col_id * N_tasks_per_thread, nBlocksIFm); libxsmm_blasint my_N_end = LIBXSMM_MIN( (my_col_id+1) * N_tasks_per_thread, nBlocksIFm); libxsmm_blasint my_M_start = LIBXSMM_MIN( my_row_id * M_tasks_per_thread, nBlocksOFm); libxsmm_blasint my_M_end = LIBXSMM_MIN( (my_row_id+1) * M_tasks_per_thread, nBlocksOFm); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, dfilter, (libxsmm_bfloat16*)delwt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter, (libxsmm_bfloat16*)wt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(4, float, master_filter, (float*)master_wt_ptr, nBlocksIFm, bc, bk); libxsmm_bfloat16 *wt_bf16, *dwt_bf16; float *wt_fp32; for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { dwt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); wt_bf16 = (libxsmm_bfloat16*) &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); wt_fp32 = (float*) &LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for ( i = 0; i < bc*bk; i+=16 ) { __m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( wt_fp32+i ), _mm512_mul_ps( vlr, _mm512_load_fil( (libxsmm_bfloat16*)dwt_bf16 + i ) ) ); _mm512_store_fil( wt_bf16+i, newfilter ); _mm512_storeu_ps( wt_fp32+i, newfilter ); } } } } else { libxsmm_blasint iv = ( (thr_end-thr_begin)/16 ) * 16; /* compute iterations which are vectorizable */ for ( i = thr_begin; i < thr_begin+iv; i+=16 ) { __m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( master_wt_ptr+i ), _mm512_mul_ps( vlr, _mm512_load_fil( delwt_ptr + i ) ) ); _mm512_store_fil( wt_ptr+i, newfilter ); _mm512_storeu_ps( master_wt_ptr+i, newfilter ); } for ( i = thr_begin+iv; i < thr_end; ++i ) { libxsmm_bfloat16_hp t1, t2; t1.i[0] =0; t1.i[1] = delwt_ptr[i]; master_wt_ptr[i] = master_wt_ptr[i] - (cfg.lr*t1.f); t2.f = master_wt_ptr[i]; wt_ptr[i] = t2.i[1]; } } #else for ( i = thr_begin; i < thr_end; ++i ) { libxsmm_bfloat16_hp t1, t2; t1.i[0] =0; t1.i[1] = delwt_ptr[i]; master_wt_ptr[i] = master_wt_ptr[i] - (cfg.lr*t1.f); t2.f = master_wt_ptr[i]; wt_ptr[i] = t2.i[1]; } #endif libxsmm_barrier_wait( cfg.barrier, ltid ); } void my_smax_fwd_exec( my_smax_fwd_config cfg, const libxsmm_bfloat16* in_act_ptr, libxsmm_bfloat16* out_act_ptr, const int* label_ptr, float* loss, int start_tid, int my_tid, void* scratch ) { libxsmm_blasint bn = cfg.bn; libxsmm_blasint Bn = cfg.N/cfg.bn; libxsmm_blasint bc = cfg.bc; libxsmm_blasint Bc = cfg.C/cfg.bc; /* loop counters */ libxsmm_blasint i = 0; libxsmm_blasint img1, img2, ifm1, ifm2; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint n_work = Bn * bn; /* compute chunk size */ const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work; const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint nc_work = Bn * bn * Bc * bc; /* compute chunk size */ const libxsmm_blasint nc_chunksize = (nc_work % cfg.threads == 0) ? (nc_work / cfg.threads) : ((nc_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint nc_thr_begin = (ltid * nc_chunksize < nc_work) ? (ltid * nc_chunksize) : nc_work; const libxsmm_blasint nc_thr_end = ((ltid + 1) * nc_chunksize < nc_work) ? ((ltid + 1) * nc_chunksize) : nc_work; libxsmm_bfloat16* poutput_bf16 = out_act_ptr; const libxsmm_bfloat16* pinput_bf16 = in_act_ptr; float* poutput_fp32 = (float*)scratch; float* pinput_fp32 = ((float*)scratch)+(cfg.N*cfg.C); LIBXSMM_VLA_DECL(4, float, output, poutput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(4, const float, input, pinput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn); /* lazy barrier init */ libxsmm_barrier_init( cfg.barrier, ltid ); for ( i = nc_thr_begin; i < nc_thr_end; ++i ) { libxsmm_bfloat16_hp in; in.i[0] = 0; in.i[1] = pinput_bf16[i]; pinput_fp32[i] = in.f; } libxsmm_barrier_wait( cfg.barrier, ltid ); for ( i = n_thr_begin; i < n_thr_end; ++i ) { float max = FLT_MIN; float sum_of_exp = 0.0f; img1 = i/bn; img2 = i%bn; /* set output to input and set compute max per image */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ); if ( LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ) > max ) { max = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ); } } } /* sum exp over outputs */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = (float)exp( (double)(LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - max) ); sum_of_exp += LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ); } } /* scale output */ sum_of_exp = 1.0f/sum_of_exp; for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * sum_of_exp; } } } libxsmm_barrier_wait( cfg.barrier, ltid ); /* calculate loss single threaded */ if ( ltid == 0 ) { (*loss) = 0.0f; for ( img1 = 0; img1 < Bn; ++img1 ) { for ( img2 = 0; img2 <bn; ++img2 ) { libxsmm_blasint ifm = (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn ); libxsmm_blasint ifm1b = ifm/bc; libxsmm_blasint ifm2b = ifm%bc; float val = ( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) > FLT_MIN ) ? LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) : FLT_MIN; *loss += LIBXSMM_LOGF( val ); } } *loss = ((-1.0f)*(*loss))/cfg.N; } libxsmm_barrier_wait( cfg.barrier, ltid ); for ( i = nc_thr_begin; i < nc_thr_end; ++i ) { libxsmm_bfloat16_hp in; in.f = poutput_fp32[i]; poutput_bf16[i] = in.i[1]; } libxsmm_barrier_wait( cfg.barrier, ltid ); } void my_vnni_reformat_exec( my_vnni_reformat_config cfg, libxsmm_bfloat16* delin_act_ptr, libxsmm_bfloat16* tr_delin_act_ptr, unsigned char* relu_ptr, int start_tid, int my_tid ) { /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bc = cfg.bc; const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; libxsmm_blasint lpb = 2; const libxsmm_blasint bn_lp = bn/lpb; libxsmm_blasint mb1ifm1, mb1, ifm1; libxsmm_meltw_unary_param trans_param; libxsmm_meltw_unary_param relu_params; LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32*)relu_ptr : NULL, nBlocksIFm, cfg.bn, cfg.bc/32); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, tr_dinput, (libxsmm_bfloat16* )tr_delin_act_ptr, nBlocksMB, bn_lp, bc, lpb); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, dinput, (libxsmm_bfloat16* )delin_act_ptr, nBlocksIFm, bn, bc); /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksIFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; LIBXSMM_UNUSED( trans_param ); LIBXSMM_UNUSED( tr_dinput_ ); /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; if (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) { relu_params.in.primary =(void*) &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); relu_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); relu_params.in.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ifm1, 0, 0, nBlocksIFm, cfg.bn, cfg.bc/32); cfg.fused_relu_kernel(&relu_params); } } libxsmm_barrier_wait(cfg.barrier, ltid); } void my_smax_bwd_exec( my_smax_bwd_config cfg, libxsmm_bfloat16* delin_act_ptr, const libxsmm_bfloat16* out_act_ptr, const int* label_ptr, int start_tid, int my_tid, void* scratch ) { libxsmm_blasint bn = cfg.bn; libxsmm_blasint Bn = cfg.N/cfg.bn; libxsmm_blasint bc = cfg.bc; libxsmm_blasint Bc = cfg.C/cfg.bc; /* loop counters */ libxsmm_blasint i = 0; libxsmm_blasint img1, img2, ifm1, ifm2; float rcp_N = 1.0f/cfg.N; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint n_work = Bn * bn; /* compute chunk size */ const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work; const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint nc_work = Bn * bn * Bc * bc; /* compute chunk size */ const int nc_chunksize = (nc_work % cfg.threads == 0) ? (nc_work / cfg.threads) : ((nc_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const int nc_thr_begin = (ltid * nc_chunksize < nc_work) ? (ltid * nc_chunksize) : nc_work; const int nc_thr_end = ((ltid + 1) * nc_chunksize < nc_work) ? ((ltid + 1) * nc_chunksize) : nc_work; const libxsmm_bfloat16* poutput_bf16 = out_act_ptr; libxsmm_bfloat16* pdinput_bf16 = delin_act_ptr; float* poutput_fp32 = (float*)scratch; float* pdinput_fp32 = ((float*)scratch)+(cfg.N*cfg.C); LIBXSMM_VLA_DECL(4, const float, output, poutput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(4, float, dinput, pdinput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn); /* lazy barrier init */ libxsmm_barrier_init( cfg.barrier, ltid ); for ( i = nc_thr_begin; i < nc_thr_end; ++i ) { libxsmm_bfloat16_hp out; out.i[0] = 0; out.i[1] = poutput_bf16[i]; poutput_fp32[i] = out.f; } libxsmm_barrier_wait( cfg.barrier, ltid ); for ( i = n_thr_begin; i < n_thr_end; ++i ) { img1 = i/bn; img2 = i%bn; /* set output to input and set compute max per image */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { if ( (ifm1*Bc)+ifm2 == (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn ) ) { LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) = ( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - 1.0f ) * rcp_N * cfg.loss_weight; } else { LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * rcp_N * cfg.loss_weight; } } } } libxsmm_barrier_wait( cfg.barrier, ltid ); for ( i = nc_thr_begin; i < nc_thr_end; ++i ) { libxsmm_bfloat16_hp in; in.f = pdinput_fp32[i]; pdinput_bf16[i] = in.i[1]; } libxsmm_barrier_wait( cfg.barrier, ltid ); } void init_master_weights( my_opt_config cfg, float* master_wt_ptr, size_t size) { #if 0 if (0/* && cfg.upd_N_hyperpartitions != 1 */) { /*TODO: add hyperpartitions (?)*/ /* Spread out weights in a blocked fasion since we partition the MODEL dimenstion */ init_buffer_block_numa((libxsmm_bfloat16*) master_wt_ptr, size/2); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa((libxsmm_bfloat16*) master_wt_ptr, size/2); } #endif } void init_weights( my_fc_fwd_config cfg, libxsmm_bfloat16* wt_ptr, size_t size) { if (cfg.fwd_M_hyperpartitions != 1) { /* Spread out weights in a blocked fasion since we partition the MODEL dimenstion */ init_buffer_block_numa(wt_ptr, size); } else { /* Init weights in a block fashion */ init_buffer_block_cyclic_numa(wt_ptr, size); } } void init_dweights( my_fc_bwd_config cfg, libxsmm_bfloat16* dwt_ptr, size_t size) { if (cfg.upd_N_hyperpartitions != 1) { /* Spread out weights */ init_buffer_block_numa(dwt_ptr, size); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa(dwt_ptr, size); } } void init_acts( my_fc_fwd_config cfg, libxsmm_bfloat16* act_ptr, size_t size) { if (cfg.fwd_N_hyperpartitions != 1) { /* Spread out weights */ init_buffer_block_numa(act_ptr, size); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa(act_ptr, size); } } void init_delacts( my_fc_bwd_config cfg, libxsmm_bfloat16* delact_ptr, size_t size) { if (cfg.bwd_N_hyperpartitions != 1) { /* Spread out weights */ init_buffer_block_numa(delact_ptr, size); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa(delact_ptr, size); } } int main(int argc, char* argv[]) { libxsmm_bfloat16 **act_libxsmm, **fil_libxsmm, **delact_libxsmm, **delfil_libxsmm; libxsmm_bfloat16 **bias_libxsmm, **delbias_libxsmm; float **fil_master; unsigned char **relumask_libxsmm; int *label_libxsmm; my_eltwise_fuse my_fuse; my_fc_fwd_config* my_fc_fwd; my_fc_bwd_config* my_fc_bwd; my_opt_config* my_opt; my_smax_fwd_config my_smax_fwd; my_smax_bwd_config my_smax_bwd; my_vnni_reformat_config my_vnni_reformat; void* scratch = NULL; size_t scratch_size = 0; /* some parameters we can overwrite via cli, default is some inner layer of overfeat */ int iters = 10; /* repetitions of benchmark */ int MB = 32; /* mini-batch size, "N" */ int fuse_type = 0; /* 0: nothing fused, 1: relu fused, 2: elementwise fused, 3: relu and elementwise fused */ char type = 'A'; /* 'A': ALL, 'F': FP, 'B': BP */ int bn = 64; int bk = 64; int bc = 64; int *C; /* number of input feature maps, "C" */ int num_layers = 0; const char *const env_check = getenv("CHECK"); const double check = LIBXSMM_ABS(0 == env_check ? 1 : atof(env_check)); #if defined(_OPENMP) int nThreads = omp_get_max_threads(); /* number of threads */ #else int nThreads = 1; /* number of threads */ #endif unsigned long long l_start, l_end; double l_total = 0.0; double gflop = 0.0; int i, j; double act_size = 0.0; double fil_size = 0.0; float lr = 0.1f; float loss_weight = 1.0f; float loss = 0.0; libxsmm_matdiff_info norms_fwd, norms_bwd, norms_upd, diff; libxsmm_matdiff_clear(&norms_fwd); libxsmm_matdiff_clear(&norms_bwd); libxsmm_matdiff_clear(&norms_upd); libxsmm_matdiff_clear(&diff); char* env_threads_per_numa; if (argc > 1 && !strncmp(argv[1], "-h", 3)) { printf("Usage: %s iters MB bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* reading new values from cli */ i = 1; num_layers = argc - 7; if (argc > i) iters = atoi(argv[i++]); if (argc > i) MB = atoi(argv[i++]); if (argc > i) bn = atoi(argv[i++]); if (argc > i) bk = atoi(argv[i++]); if (argc > i) bc = atoi(argv[i++]); /* allocate the number of channles buffer */ if ( num_layers < 1 ) { printf("Usage: %s iters MB bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } C = (int*)malloc((num_layers+2)*sizeof(int)); for (j = 0 ; i < argc; ++i, ++j ) { C[j] = atoi(argv[i]); } /* handle softmax config */ C[num_layers+1] = C[num_layers]; #if defined(__SSE3__) _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST); #endif /* Read env variables */ env_threads_per_numa = getenv("THREADS_PER_NUMA"); if ( 0 == env_threads_per_numa ) { printf("please specify THREADS_PER_NUMA to a non-zero value!\n"); return -1; } else { threads_per_numa = atoi(env_threads_per_numa); } /* print some summary */ printf("##########################################\n"); printf("# Setting Up (Common) #\n"); printf("##########################################\n"); printf("PARAMS: N:%d\n", MB); printf("PARAMS: Layers: %d\n", num_layers); printf("PARAMS: ITERS:%d", iters); if (LIBXSMM_FEQ(0, check)) printf(" Threads:%d\n", nThreads); else printf("\n"); for (i = 0; i < num_layers; ++i ) { if (i == 0) { act_size += (double)(MB*C[i]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i, MB, C[i], (double)(MB*C[i]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) ); } act_size += (double)(MB*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0); fil_size += (double)(C[i]*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0); printf("SIZE Filter %i (%dx%d): %10.2f MiB\n", i, C[i], C[i+1], (double)(C[i]*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) ); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i+1, MB, C[i+1], (double)(MB*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) ); } act_size += (double)(MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0); printf("SIZE Activations softmax (%dx%d): %10.2f MiB\n", MB, C[num_layers+1], (double)(MB*C[num_layers+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) ); printf("\nTOTAL SIZE Activations: %10.2f MiB\n", act_size ); printf("TOTAL SIZE Filter (incl. master): %10.2f MiB\n", 3.0*fil_size ); printf("TOTAL SIZE delActivations: %10.2f MiB\n", act_size ); printf("TOTAL SIZE delFilter: %10.2f MiB\n", fil_size ); printf("TOTAL SIZE MLP: %10.2f MiB\n", (4.0*fil_size) + (2.0*act_size) ); /* allocate data */ act_libxsmm = (libxsmm_bfloat16**)malloc( (num_layers+2)*sizeof(libxsmm_bfloat16*) ); delact_libxsmm = (libxsmm_bfloat16**)malloc( (num_layers+1)*sizeof(libxsmm_bfloat16*) ); for ( i = 0 ; i < num_layers+2; ++i ) { act_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( MB*C[i]*sizeof(libxsmm_bfloat16), 2097152); /* softmax has no incoming gradients */ if ( i < num_layers+1 ) { delact_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( MB*C[i]*sizeof(libxsmm_bfloat16), 2097152); } } fil_master = (float**) malloc( num_layers*sizeof(float*) ); fil_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) ); delfil_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) ); for ( i = 0 ; i < num_layers; ++i ) { fil_master[i] = (float*) libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152); fil_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(libxsmm_bfloat16), 2097152); delfil_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(libxsmm_bfloat16), 2097152); } bias_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) ); delbias_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) ); for ( i = 0 ; i < num_layers; ++i ) { bias_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i+1]*sizeof(libxsmm_bfloat16), 2097152); delbias_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i+1]*sizeof(libxsmm_bfloat16), 2097152); } relumask_libxsmm = (unsigned char**)malloc( num_layers*sizeof(unsigned char*) ); for ( i = 0 ; i < num_layers; ++i ) { relumask_libxsmm[i] = (unsigned char*)libxsmm_aligned_malloc( MB*C[i+1]*sizeof(unsigned char), 2097152); } label_libxsmm = (int*)libxsmm_aligned_malloc( MB*sizeof(int), 2097152); printf("\n"); printf("##########################################\n"); printf("# Setting Up (custom-Storage) #\n"); printf("##########################################\n"); /* allocating handles */ my_fc_fwd = (my_fc_fwd_config*) malloc( num_layers*sizeof(my_fc_fwd_config) ); my_fc_bwd = (my_fc_bwd_config*) malloc( num_layers*sizeof(my_fc_bwd_config) ); my_opt = (my_opt_config*) malloc( num_layers*sizeof(my_opt_config) ); /* setting up handles + scratch */ size_t max_bwd_scratch_size = 0, max_doutput_scratch_mark = 0; scratch_size = 0; /* setting up handles + scratch */ for ( i = 0; i < num_layers; ++i ) { /* MNIST Specific where everywhere we use relu act except the last layer */ if ( i < num_layers -1 ) { my_fuse = MY_ELTWISE_FUSE_RELU; } else { my_fuse = MY_ELTWISE_FUSE_NONE; } my_fc_fwd[i] = setup_my_fc_fwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB, (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, my_fuse); my_fc_bwd[i] = setup_my_fc_bwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB, (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, my_fuse, lr); my_opt[i] = setup_my_opt( C[i], C[i+1], (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, lr ); if (my_fc_bwd[i].scratch_size > 0 && my_fc_bwd[i].scratch_size > max_bwd_scratch_size) { max_bwd_scratch_size = my_fc_bwd[i].scratch_size; } if (my_fc_bwd[i].doutput_scratch_mark > 0 && my_fc_bwd[i].doutput_scratch_mark > max_doutput_scratch_mark) { max_doutput_scratch_mark = my_fc_bwd[i].doutput_scratch_mark; } /* let's allocate and bind scratch */ if ( my_fc_fwd[i].scratch_size > 0 || my_fc_bwd[i].scratch_size > 0 || my_opt[i].scratch_size > 0 ) { size_t alloc_size = LIBXSMM_MAX( LIBXSMM_MAX( my_fc_fwd[i].scratch_size, my_fc_bwd[i].scratch_size), my_opt[i].scratch_size ); if ( alloc_size > scratch_size ) { scratch_size = alloc_size; } } } /* softmax+loss is treated as N+! layer */ my_smax_fwd = setup_my_smax_fwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB, (C[num_layers+1] % bk == 0) ? bk : C[num_layers+1], nThreads ); my_smax_bwd = setup_my_smax_bwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB, (C[num_layers+1] % bk == 0) ? bk : C[num_layers+1], nThreads, loss_weight); my_vnni_reformat = setup_my_vnni_reformat(MB, C[num_layers], (MB % bn == 0) ? bn : MB, (C[num_layers] % bk == 0) ? bk : C[num_layers], nThreads, my_fuse); if ( my_smax_fwd.scratch_size > 0 || my_smax_bwd.scratch_size > 0 ) { size_t alloc_size = LIBXSMM_MAX( my_smax_fwd.scratch_size, my_smax_bwd.scratch_size ); if ( alloc_size > scratch_size ) { scratch_size = alloc_size; } } scratch = libxsmm_aligned_scratch( scratch_size, 2097152 ); /* init data */ for ( i = 0 ; i < num_layers+2; ++i ) { init_acts(my_fc_fwd[i], act_libxsmm[i], MB*C[i]); } for ( i = 0 ; i < num_layers+1; ++i ) { init_delacts(my_fc_bwd[i], delact_libxsmm[i], MB*C[i]); } for ( i = 0 ; i < num_layers; ++i ) { /*init_master_weights(my_opt[i], fil_master[i], C[i]*C[i+1] );*/ my_init_buf( fil_master[i], C[i]*C[i+1], 0, 0 ); libxsmm_rne_convert_fp32_bf16( fil_master[i], fil_libxsmm[i], C[i]*C[i+1] ); /*init_weights(my_fc_fwd[i], fil_libxsmm[i], C[i]*C[i+1]);*/ init_dweights(my_fc_bwd[i], delfil_libxsmm[i], C[i]*C[i+1]); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf_bf16( bias_libxsmm[i], C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf_bf16( delbias_libxsmm[i], C[i+1], 0, 0 ); } zero_buf_int32( label_libxsmm, MB ); /* Reading in the MNIST dataset */ int n_batches = NUM_TRAIN/MB, batch_id = 0; int n_epochs = iters, epoch_id = 0; libxsmm_bfloat16 *input_acts = (libxsmm_bfloat16*)libxsmm_aligned_malloc( NUM_TRAIN * C[0] * sizeof(libxsmm_bfloat16), 2097152); /* Read in input data */ char *train_image_path = "../mlpdriver/mnist_data/train-images.idx3-ubyte"; char *train_label_path = "../mlpdriver/mnist_data/train-labels.idx1-ubyte"; char *test_image_path = "../mlpdriver/mnist_data/t10k-images.idx3-ubyte"; char *test_label_path = "../mlpdriver/mnist_data/t10k-labels.idx1-ubyte"; load_mnist(train_image_path, train_label_path, test_image_path, test_label_path); /* Format the input layer in NCNC blocked format */ int _i, _j; for (_i = 0; _i < n_batches*MB; _i++) { for (_j = 0; _j < C[0]; _j++) { float val = (_j < 784) ? (float) train_image[_i][_j] : (float)0.0; int batchid = _i/MB; int mb = _i % MB; int _bn = (MB % bn == 0) ? bn : MB; int _bc = (C[0] % bc == 0) ? bc : C[0]; libxsmm_bfloat16 *cur_pos = input_acts + batchid * MB *C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc); libxsmm_rne_convert_fp32_bf16( &val, cur_pos, 1 ); } } printf("###########################################\n"); printf("# Training MNIST with %d training samples #\n", n_batches*MB); printf("###########################################\n"); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i,j,epoch_id,batch_id) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif for (epoch_id = 0; epoch_id < n_epochs; epoch_id++) { for (batch_id = 0; batch_id < n_batches; batch_id++) { for ( i = 0; i < num_layers; ++i) { libxsmm_bfloat16 *input_act_ptr = (i == 0) ? input_acts + batch_id * MB * C[0] : act_libxsmm[i]; my_fc_fwd_exec( my_fc_fwd[i], fil_libxsmm[i], input_act_ptr, act_libxsmm[i+1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch ); } my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], train_label + batch_id * MB, &loss, 0, tid, scratch ); if ((tid == 0) && (batch_id == 0) && (epoch_id % 10 == 0 || epoch_id == n_epochs - 1 )) { printf("Loss for epoch %d batch_id %d is %f\n", epoch_id, batch_id, loss); } my_smax_bwd_exec( my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], train_label + batch_id * MB, 0, tid, scratch ); for ( i = num_layers-1; i > 0; --i) { my_fc_bwd_exec( my_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i], act_libxsmm[i], delbias_libxsmm[i], (my_fc_bwd[i].fuse_relu_bwd > 0) ? relumask_libxsmm[i-1] : relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch, fil_master[i] ); } my_fc_bwd_exec( my_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0], input_acts + batch_id * MB * C[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch, fil_master[0] ); } } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); gflop = 0.0; for ( i = num_layers-1; i > 0; --i) { gflop += (6.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)((double)n_epochs *(double)n_batches)) / (1000.0*1000.0*1000.0); } gflop += (4.0*(double)MB*(double)C[0]*(double)C[1]*(double)((double)n_epochs *(double)n_batches)) / (1000.0*1000.0*1000.0); printf("GFLOP = %.5g\n", gflop/(double)((double)n_epochs *(double)n_batches)); printf("fp time = %.5g\n", ((double)(l_total/((double)n_epochs *(double)n_batches)))); printf("GFLOPS = %.5g\n", gflop/l_total); printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB ); for ( i = 0; i < num_layers; ++i ) { printf("%i,", C[i] ); } printf("%f,%f\n", ((double)(l_total/((double)n_epochs *(double)n_batches))), gflop/l_total); #ifdef TEST_ACCURACY /* Test accuracy */ n_batches = NUM_TEST/MB; for (_i = 0; _i < n_batches * MB; _i++) { for (_j = 0; _j < C[0]; _j++) { float val = (_j < 784) ? (float) test_image[_i][_j] : 0.0; int batchid = _i/MB; int mb = _i % MB; int _bn = (MB % bn == 0) ? bn : MB; int _bc = (C[0] % bc == 0) ? bc : C[0]; libxsmm_bfloat16 *cur_pos = input_acts + batchid * MB *C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc); libxsmm_rne_convert_fp32_bf16( &val, cur_pos, 1 ); } } n_batches = NUM_TEST/MB; unsigned int hits = 0; unsigned int samples = 0; #if defined(_OPENMP) # pragma omp parallel private(i,j,batch_id) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif for (batch_id = 0; batch_id < n_batches; batch_id++) { for ( i = 0; i < num_layers; ++i) { libxsmm_bfloat16 *input_act_ptr = (i == 0) ? input_acts + batch_id * MB * C[0] : act_libxsmm[i]; my_fc_fwd_exec( my_fc_fwd[i], fil_libxsmm[i], input_act_ptr, act_libxsmm[i+1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch ); } my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], test_label + batch_id * MB, &loss, 0, tid, scratch ); if (tid == 0) { for (_i = 0; _i < MB; _i++) { int label = *(test_label + batch_id * MB + _i); int max_id = 0; float max_val = 0.0; libxsmm_convert_bf16_f32( act_libxsmm[num_layers+1] + _i * 10, &max_val, 1 ); /* Find predicted label */ for (_j = 1; _j < 10; _j++) { libxsmm_bfloat16 val = *(act_libxsmm[num_layers+1] + _i * 10 + _j); float f32_val; libxsmm_convert_bf16_f32( &val, &f32_val, 1 ); if (f32_val > max_val) { max_id = _j; max_val = f32_val; } } /* Compare with true label */ if (max_id == label) { hits++; } samples++; } } #pragma omp barrier } } printf("Accuracy is %f %% (%d test samples)\n", (1.0*hits)/(1.0*samples)*100.0, samples); #endif /* deallocate data */ if ( scratch != NULL ) { libxsmm_free(scratch); } for ( i = 0; i < num_layers; ++i ) { if ( i == 0 ) { libxsmm_free(act_libxsmm[i]); libxsmm_free(delact_libxsmm[i]); } libxsmm_free(act_libxsmm[i+1]); libxsmm_free(delact_libxsmm[i+1]); libxsmm_free(fil_libxsmm[i]); libxsmm_free(delfil_libxsmm[i]); libxsmm_free(bias_libxsmm[i]); libxsmm_free(delbias_libxsmm[i]); libxsmm_free(relumask_libxsmm[i]); libxsmm_free(fil_master[i]); } libxsmm_free(act_libxsmm[num_layers+1]); libxsmm_free(label_libxsmm); libxsmm_free(input_acts); free( my_opt ); free( my_fc_fwd ); free( my_fc_bwd ); free( act_libxsmm ); free( delact_libxsmm ); free( fil_master ); free( fil_libxsmm ); free( delfil_libxsmm ); free( bias_libxsmm ); free( delbias_libxsmm ); free( relumask_libxsmm ); free( C ); /* some empty lines at the end */ printf("\n\n\n"); return 0; }
/* * Evangelos Georganas, Alexander Heinecke (Intel Corp.) **************************************************************************** */ #include <libxsmm.h> #include <libxsmm_sync.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> /* include c-based dnn library */ #include "../common/dnn_common.h" #include "../common/mnist.h" #define TEST_ACCURACY #define OVERWRITE_DOUTPUT_BWDUPD /* #define FUSE_WT_TRANS_SGD */ /* #define FUSE_ACT_TRANS_FWD */ /* #define FUSE_DACT_TRANS_BWD */ #define PRIVATE_WT_TRANS #define PRIVATE_ACT_TRANS #define PRIVATE_DACT_TRANS #define FUSE_SGD_IN_BWD #define _mm512_load_fil(A) _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepi16_epi32(_mm256_loadu_si256((__m256i*)(A))),16)) #define _mm512_store_fil(A,B) _mm256_storeu_si256((__m256i*)(A), (__m256i)LIBXSMM_INTRINSICS_MM512_CVT_FP32_BF16((B))) static int threads_per_numa = 0; LIBXSMM_INLINE void my_init_buf(float *buf, size_t size, int initPos, int initOne) { int i; zero_buf(buf, size); for (i = 0; i < (int)size; ++i) { buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64() / 10.0))); } } LIBXSMM_INLINE void my_init_buf_bf16(libxsmm_bfloat16 * buf, size_t size, int initPos, int initOne) { int i; zero_buf_bf16(buf, size); for (i = 0; i < (int)size; ++i) { libxsmm_bfloat16_hp tmp; tmp.f = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64() / 10.0))); buf[i] = tmp.i[1]; } } LIBXSMM_INLINE void init_buf_bf16_numa_aware(int threads, int ltid, int ft_mode, libxsmm_bfloat16 * buf, size_t size, int initPos, int initOne) { int chunksize, chunks; int my_numa_node = ltid / threads_per_numa; int n_numa_nodes = threads / threads_per_numa; int l = 0; if (ft_mode == 0) { /* Mode 0 : Block cyclic assignment to NUMA nodes */ int bufsize = size * 2; chunksize = 4096; chunks = (bufsize + chunksize - 1) / chunksize; for (l = 0; l < chunks; l++) { int _chunksize = (l < chunks - 1) ? chunksize : bufsize - (chunks - 1) * chunksize; if (l % n_numa_nodes == my_numa_node) { my_init_buf_bf16((libxsmm_bfloat16 *) buf + l * (chunksize / 2), _chunksize / 2, 0, 0); } } } else { /* Mode 1: Block assignement to NUMA nodes */ chunks = n_numa_nodes; chunksize = (size + chunks - 1) / chunks; for (l = 0; l < chunks; l++) { int _chunksize = (l < chunks - 1) ? chunksize : size - (chunks - 1) * chunksize; if (l == my_numa_node) { my_init_buf_bf16((libxsmm_bfloat16 *) buf + l * chunksize, _chunksize, 0, 0); } } } } void init_buffer_block_numa(libxsmm_bfloat16 * buf, size_t size) { int nThreads = omp_get_max_threads(); { if (tid % threads_per_numa == 0) { init_buf_bf16_numa_aware(nThreads, tid, 1, buf, size, 0, 0); } } } void init_buffer_block_cyclic_numa(libxsmm_bfloat16 * buf, size_t size) { int nThreads = omp_get_max_threads(); { if (tid % threads_per_numa == 0) { init_buf_bf16_numa_aware(nThreads, tid, 0, buf, size, 0, 0); } } } #if 0 LIBXSMM_INLINE void my_matrix_copy_KCCK_to_KCCK_vnni(float *src, float *dst, int C, int K, int bc, int bk) { int k1, k2, c1, c2; int kBlocks = K / bk; int cBlocks = C / bc; LIBXSMM_VLA_DECL(4, float, real_src, src, cBlocks, bc, bk); LIBXSMM_VLA_DECL(5, float, real_dst, dst, cBlocks, bc / 2, bk, 2); for (k1 = 0; k1 < kBlocks; k1++) { for (c1 = 0; c1 < cBlocks; c1++) { for (c2 = 0; c2 < bc; c2++) { for (k2 = 0; k2 < bk; k2++) { LIBXSMM_VLA_ACCESS(5, real_dst, k1, c1, c2 / 2, k2, c2 % 2, cBlocks, bc / 2, bk, 2) = LIBXSMM_VLA_ACCESS(4, real_src, k1, c1, c2, k2, cBlocks, bc, bk); } } } } } #endif typedef enum my_eltwise_fuse { MY_ELTWISE_FUSE_NONE = 0, MY_ELTWISE_FUSE_BIAS = 1, MY_ELTWISE_FUSE_RELU = 2, MY_ELTWISE_FUSE_BIAS_RELU = MY_ELTWISE_FUSE_BIAS | MY_ELTWISE_FUSE_RELU } my_eltwise_fuse; typedef enum my_pass { MY_PASS_FWD = 1, MY_PASS_BWD_D = 2, MY_PASS_BWD_W = 4, MY_PASS_BWD = 6 } my_pass; typedef struct my_opt_config { libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; libxsmm_blasint opt_2d_blocking; libxsmm_blasint opt_col_teams; libxsmm_blasint opt_row_teams; float lr; size_t scratch_size; libxsmm_barrier *barrier; } my_opt_config; typedef struct my_smax_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint threads; size_t scratch_size; libxsmm_barrier *barrier; } my_smax_fwd_config; typedef struct my_smax_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint threads; size_t scratch_size; float loss_weight; libxsmm_barrier *barrier; my_eltwise_fuse fuse_type; } my_smax_bwd_config; typedef struct my_vnni_reformat_config { libxsmm_blasint C; libxsmm_blasint N; libxsmm_blasint bc; libxsmm_blasint bn; libxsmm_blasint threads; libxsmm_barrier *barrier; my_eltwise_fuse fuse_type; libxsmm_meltwfunction_unary norm_to_vnni_kernel; libxsmm_meltwfunction_unary fused_relu_kernel; } my_vnni_reformat_config; typedef struct my_fc_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; my_eltwise_fuse fuse_type; libxsmm_blasint fwd_bf; libxsmm_blasint fwd_2d_blocking; libxsmm_blasint fwd_col_teams; libxsmm_blasint fwd_row_teams; libxsmm_blasint fwd_M_hyperpartitions; libxsmm_blasint fwd_N_hyperpartitions; size_t scratch_size; libxsmm_barrier *barrier; libxsmm_bsmmfunction fwd_config_kernel; libxsmm_bsmmfunction tilerelease_kernel; libxsmm_bsmmfunction_reducebatch_strd gemm_fwd; libxsmm_bsmmfunction_reducebatch_strd gemm_fwd2; libxsmm_bmmfunction_reducebatch_strd gemm_fwd3; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd4; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd5; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd6; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd7; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd8; libxsmm_meltwfunction_unary fwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary fwd_cvtfp32bf16_relu_kernel; libxsmm_meltwfunction_unary fwd_sigmoid_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary fwd_zero_kernel; libxsmm_meltwfunction_unary fwd_copy_bf16fp32_kernel; libxsmm_meltwfunction_unary fwd_colbcast_bf16fp32_copy_kernel; } my_fc_fwd_config; typedef struct my_fc_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; my_eltwise_fuse fuse_type; libxsmm_blasint bwd_bf; libxsmm_blasint bwd_2d_blocking; libxsmm_blasint bwd_col_teams; libxsmm_blasint bwd_row_teams; libxsmm_blasint bwd_M_hyperpartitions; libxsmm_blasint bwd_N_hyperpartitions; libxsmm_blasint upd_bf; libxsmm_blasint upd_2d_blocking; libxsmm_blasint upd_col_teams; libxsmm_blasint upd_row_teams; libxsmm_blasint upd_M_hyperpartitions; libxsmm_blasint upd_N_hyperpartitions; libxsmm_blasint ifm_subtasks; libxsmm_blasint ofm_subtasks; libxsmm_blasint fuse_relu_bwd; size_t bwd_private_tr_wt_scratch_mark; size_t upd_private_tr_act_scratch_mark; size_t upd_private_tr_dact_scratch_mark; size_t scratch_size; size_t doutput_scratch_mark; libxsmm_barrier *barrier; libxsmm_bsmmfunction bwd_config_kernel; libxsmm_bsmmfunction upd_config_kernel; libxsmm_bsmmfunction tilerelease_kernel; libxsmm_bsmmfunction_reducebatch_strd gemm_bwd; libxsmm_bsmmfunction_reducebatch_strd gemm_bwd2; libxsmm_bmmfunction_reducebatch_strd gemm_bwd3; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_bwd5; libxsmm_meltwfunction_unary bwd_fused_relu_kernel; libxsmm_bsmmfunction_reducebatch_strd gemm_upd; libxsmm_bsmmfunction_reducebatch_strd gemm_upd2; libxsmm_bmmfunction_reducebatch_strd gemm_upd3; libxsmm_meltwfunction_unary bwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary upd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary bwd_relu_kernel; libxsmm_meltwfunction_unary bwd_zero_kernel; libxsmm_meltwfunction_unary upd_zero_kernel; libxsmm_meltwfunction_unary delbias_reduce_kernel; libxsmm_meltwfunction_unary vnni_to_vnniT_kernel; libxsmm_meltwfunction_unary norm_to_normT_kernel; libxsmm_meltwfunction_unary norm_to_vnni_kernel; libxsmm_meltwfunction_unary norm_to_vnni_kernel_wt; float lr; } my_fc_bwd_config; my_vnni_reformat_config setup_my_vnni_reformat(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads, my_eltwise_fuse fuse_type) { my_vnni_reformat_config res; libxsmm_blasint ld = bc; res.N = N; res.C = C; res.bn = bn; res.bc = bc; res.threads = threads; res.fuse_type = fuse_type; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); res.fused_relu_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn, &ld, &ld, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV); if (res.fused_relu_kernel == NULL) { fprintf(stderr, "JIT for TPP fused_relu_kernel failed. Bailing...!\n"); exit(-1); } return res; } my_fc_fwd_config setup_my_fc_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) { my_fc_fwd_config res; libxsmm_blasint lda = bk; libxsmm_blasint ldb = bc; libxsmm_blasint ldc = bk; libxsmm_blasint ld_zero = bk * bn; libxsmm_blasint ld_upconvert = K; float alpha = 1.0 f; float beta = 1.0 f; float zerobeta = 0.0 f; libxsmm_meltw_flags fusion_flags; int l_flags, l_tc_flags; int l_tr_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | (LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N')); libxsmm_blasint unroll_hint; /* setting up some handle values */ res.N = N; res.C = C; res.K = K; res.bn = bn; res.bc = bc; res.bk = bk; res.threads = threads; res.fuse_type = fuse_type; /* setup parallelization strategy */ res.fwd_M_hyperpartitions = 1; res.fwd_N_hyperpartitions = 1; if (threads == 16) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 2; res.fwd_row_teams = 8; } else if (threads == 14) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 2; res.fwd_row_teams = 7; } else if (threads == 56) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 1; res.fwd_row_teams = 14; res.fwd_M_hyperpartitions = 1; res.fwd_N_hyperpartitions = 4; } else { res.fwd_bf = 1; res.fwd_2d_blocking = 0; res.fwd_col_teams = 1; res.fwd_row_teams = 1; } #if 0 res.fwd_bf = atoi(getenv("FWD_BF")); res.fwd_2d_blocking = atoi(getenv("FWD_2D_BLOCKING")); res.fwd_col_teams = atoi(getenv("FWD_COL_TEAMS")); res.fwd_row_teams = atoi(getenv("FWD_ROW_TEAMS")); #endif /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ l_flags = (LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N')) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | (LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N')); unroll_hint = (res.C / res.bc) / res.fwd_bf; res.fwd_config_kernel = libxsmm_bsmmdispatch(res.bk, res.bn, res.bc, &lda, &ldb, &ldc, NULL, &beta, &l_tc_flags, NULL); if (res.fwd_config_kernel == NULL) { fprintf(stderr, "JIT for BRGEMM TPP fwd_config_kernel failed. Bailing...!\n"); exit(-1); } res.gemm_fwd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &beta, &l_flags, NULL); if (res.gemm_fwd == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_fwd failed. Bailing...!\n"); exit(-1); } res.gemm_fwd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if (res.gemm_fwd2 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_fwd2 failed. Bailing...!\n"); exit(-1); } res.gemm_fwd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if (res.gemm_fwd3 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_fwd3 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_OVERWRITE_C; res.gemm_fwd4 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if (res.gemm_fwd4 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_fwd4 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_ACT_RELU_OVERWRITE_C; res.gemm_fwd5 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if (res.gemm_fwd5 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_fwd5 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_ACT_SIGM_OVERWRITE_C; res.gemm_fwd6 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if (res.gemm_fwd6 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_fwd6 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_ACT_RELU_OVERWRITE_C; res.gemm_fwd7 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if (res.gemm_fwd7 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_fwd7 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_ACT_SIGM_OVERWRITE_C; res.gemm_fwd8 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if (res.gemm_fwd8 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_fwd8 failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.fwd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if (res.fwd_cvtfp32bf16_kernel == NULL) { fprintf(stderr, "JIT for TPP fwd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_cvtfp32bf16_relu_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK, LIBXSMM_MELTW_TYPE_UNARY_RELU); if (res.fwd_cvtfp32bf16_relu_kernel == NULL) { fprintf(stderr, "JIT for TPP fwd_cvtfp32bf16_relu_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_sigmoid_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_SIGMOID); if (res.fwd_sigmoid_cvtfp32bf16_kernel == NULL) { fprintf(stderr, "JIT for TPP fwd_sigmoid_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.tilerelease_kernel = libxsmm_bsmmdispatch(res.bk, res.bk, res.bk, NULL, NULL, NULL, NULL, NULL, &l_tr_flags, NULL); if (res.tilerelease_kernel == NULL) { fprintf(stderr, "JIT for TPP tilerelease_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_zero_kernel = libxsmm_dispatch_meltw_unary(bn * bk, 1, &ld_zero, &ld_zero, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if (res.fwd_zero_kernel == NULL) { fprintf(stderr, "JIT for TPP fwd_zero_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_colbcast_bf16fp32_copy_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_BCAST_COL, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if (res.fwd_colbcast_bf16fp32_copy_kernel == NULL) { fprintf(stderr, "JIT for TPP fwd_colbcast_bf16fp32_copy_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_copy_bf16fp32_kernel = libxsmm_dispatch_meltw_unary(K, 1, &ld_upconvert, &ld_upconvert, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if (res.fwd_copy_bf16fp32_kernel == NULL) { fprintf(stderr, "JIT for TPP fwd_copy_bf16fp32_kernel failed. Bailing...!\n"); exit(-1); } /* init scratch */ res.scratch_size = sizeof(float) * LIBXSMM_MAX(res.K * res.N, res.threads * LIBXSMM_MAX(res.bk * res.bn, res.K)); return res; } my_fc_bwd_config setup_my_fc_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type, float lr) { my_fc_bwd_config res; libxsmm_blasint lda = bk; libxsmm_blasint ldb = bc; libxsmm_blasint ldc = bk; libxsmm_blasint ld_zero_bwd = bc * bn; libxsmm_blasint ld_zero_upd = bk; libxsmm_blasint delbias_K = K; libxsmm_blasint delbias_N = N; float alpha = 1.0 f; float beta = 1.0 f; float zerobeta = 0.0 f; libxsmm_blasint updM; libxsmm_blasint updN; int l_flags, l_tc_flags; int l_tr_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | (LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N')); libxsmm_blasint unroll_hint; size_t size_bwd_scratch; size_t size_upd_scratch; libxsmm_blasint bbk; libxsmm_blasint bbc; libxsmm_blasint ldaT = bc; libxsmm_blasint ldb_orig = bc; libxsmm_meltw_flags fusion_flags_bwd; libxsmm_meltw_operation bwd_fused_op; /* setting up some handle values */ res.N = N; res.C = C; res.K = K; res.bn = bn; res.bc = bc; res.bk = bk; res.threads = threads; res.fuse_type = fuse_type; res.fuse_relu_bwd = 0; res.lr = lr; /* setup parallelization strategy */ res.bwd_M_hyperpartitions = 1; res.upd_M_hyperpartitions = 1; res.bwd_N_hyperpartitions = 1; res.upd_N_hyperpartitions = 1; if (threads == 16) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 2; res.bwd_row_teams = 8; res.upd_bf = 1; res.upd_2d_blocking = 1; res.upd_col_teams = 2; res.upd_row_teams = 8; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else if (threads == 14) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 2; res.bwd_row_teams = 7; res.upd_bf = 1; res.upd_2d_blocking = 1; res.upd_col_teams = 2; res.upd_row_teams = 7; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else if (threads == 56) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 1; res.bwd_row_teams = 14; res.bwd_M_hyperpartitions = 1; res.bwd_N_hyperpartitions = 4; res.upd_bf = 1; res.upd_2d_blocking = 1; res.upd_col_teams = 1; res.upd_row_teams = 14; res.upd_M_hyperpartitions = 1; res.upd_N_hyperpartitions = 4; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else { res.bwd_bf = 1; res.bwd_2d_blocking = 0; res.bwd_col_teams = 1; res.bwd_row_teams = 1; res.upd_bf = 1; res.upd_2d_blocking = 0; res.upd_col_teams = 1; res.upd_row_teams = 1; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } bbk = (res.upd_2d_blocking == 1) ? bk : bk / res.ofm_subtasks; bbc = (res.upd_2d_blocking == 1) ? bc : bc / res.ifm_subtasks; #if 0 res.bwd_bf = atoi(getenv("BWD_BF")); res.bwd_2d_blocking = atoi(getenv("BWD_2D_BLOCKING")); res.bwd_col_teams = atoi(getenv("BWD_COL_TEAMS")); res.bwd_row_teams = atoi(getenv("BWD_ROW_TEAMS")); res.upd_bf = atoi(getenv("UPD_BF")); res.upd_2d_blocking = atoi(getenv("UPD_2D_BLOCKING")); res.upd_col_teams = atoi(getenv("UPD_COL_TEAMS")); res.upd_row_teams = atoi(getenv("UPD_ROW_TEAMS")); res.ifm_subtasks = atoi(getenv("IFM_SUBTASKS")); res.ofm_subtasks = atoi(getenv("OFM_SUBTASKS")); #endif if (res.bwd_2d_blocking != 1) { printf("Requested private wt transposes, but for the current # of threads the bwd decomposition is not 2D. Will perform upfront/shared wt transposes...\n"); } if (res.upd_2d_blocking != 1) { printf("Requested private act transposes, but for the current # of threads the upd decomposition is not 2D. Will perform upfront/shared act transposes...\n"); } if (res.upd_2d_blocking != 1) { printf("Requested private dact transposes, but for the current # of threads the upd decomposition is not 2D. Will perform upfront/shared dact transposes...\n"); } /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ /* BWD GEMM */ l_flags = (LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N')) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | (LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N')); unroll_hint = (res.K / res.bk) / res.bwd_bf; res.gemm_bwd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bk * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &beta, &l_flags, NULL); if (res.gemm_bwd == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_bwd failed. Bailing...!\n"); exit(-1); } res.gemm_bwd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bk * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL); if (res.gemm_bwd2 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_bwd2 failed. Bailing...!\n"); exit(-1); } res.gemm_bwd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bk * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL); if (res.gemm_bwd3 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_bwd3 failed. Bailing...!\n"); exit(-1); } res.bwd_config_kernel = libxsmm_bsmmdispatch(res.bc, res.bn, res.bk, &ldb, &lda, &ldb, NULL, &beta, &l_tc_flags, NULL); if (res.bwd_config_kernel == NULL) { fprintf(stderr, "JIT for BRGEMM TPP bwd_config_kernel failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.bwd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn, &ldb, &ldb, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if (res.bwd_cvtfp32bf16_kernel == NULL) { fprintf(stderr, "JIT for TPP bwd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.bwd_relu_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV); if (res.bwd_relu_kernel == NULL) { fprintf(stderr, "JIT for TPP bwd_relu_kernel failed. Bailing...!\n"); exit(-1); } res.bwd_zero_kernel = libxsmm_dispatch_meltw_unary(bn * bc, 1, &ld_zero_bwd, &ld_zero_bwd, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if (res.bwd_zero_kernel == NULL) { fprintf(stderr, "JIT for TPP bwd_zero_kernel failed. Bailing...!\n"); exit(-1); } /* JITing the tranpose kernel */ res.vnni_to_vnniT_kernel = libxsmm_dispatch_meltw_unary(bk, bc, &lda, &ldaT, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_VNNI_TO_VNNIT); if (res.vnni_to_vnniT_kernel == NULL) { fprintf(stderr, "JIT for TPP vnni_to_vnniT_kernel failed. Bailing...!\n"); exit(-1); } bwd_fused_op = LIBXSMM_MELTW_OPERATION_COLBIAS_ACT; fusion_flags_bwd = LIBXSMM_MELTW_FLAG_ACT_RELU_BWD_OVERWRITE_C; res.gemm_bwd5 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bc, res.bn, res.bk, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bk * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL, bwd_fused_op, LIBXSMM_DATATYPE_BF16, fusion_flags_bwd, 0, 0, 0, 0); if (res.gemm_bwd5 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_bwd5 failed. Bailing...!\n"); exit(-1); } if (((fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) && (res.upd_2d_blocking == 1)) { res.fuse_relu_bwd = 1; } res.bwd_fused_relu_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn, &ldb, &ldb, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV); if (res.bwd_fused_relu_kernel == NULL) { fprintf(stderr, "JIT for TPP bwd_fused_relu_kernel failed. Bailing...!\n"); exit(-1); } /* UPD GEMM */ lda = res.bk; ldb = res.bn; ldc = res.bk; updM = res.bk / res.ofm_subtasks; updN = res.bc / res.ifm_subtasks; l_flags = (LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N')) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | (LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N')); unroll_hint = (res.N / res.bn) / res.upd_bf; res.gemm_upd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk * res.bn * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &beta, &l_flags, NULL); if (res.gemm_upd == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_upd failed. Bailing...!\n"); exit(-1); } res.gemm_upd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk * res.bn * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if (res.gemm_upd2 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_upd2 failed. Bailing...!\n"); exit(-1); } l_flags = l_flags | LIBXSMM_GEMM_FLAG_VNNI_C; res.gemm_upd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk * res.bn * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if (res.gemm_upd3 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_upd3 failed. Bailing...!\n"); exit(-1); } res.upd_config_kernel = libxsmm_bsmmdispatch(updM, updN, res.bn, &lda, &ldb, &ldc, NULL, &beta, &l_tc_flags, NULL); if (res.upd_config_kernel == NULL) { fprintf(stderr, "JIT for BRGEMM TPP upd_config_kernel failed. Bailing...!\n"); exit(-1); } res.tilerelease_kernel = libxsmm_bsmmdispatch(res.bk, res.bk, res.bk, NULL, NULL, NULL, NULL, NULL, &l_tr_flags, NULL); if (res.tilerelease_kernel == NULL) { fprintf(stderr, "JIT for TPP tilerelease_kernel failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.upd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(bbk, bbc, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if (res.upd_cvtfp32bf16_kernel == NULL) { fprintf(stderr, "JIT for TPP upd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.upd_zero_kernel = libxsmm_dispatch_meltw_unary(bbk, bbc, &ld_zero_upd, &ld_zero_upd, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if (res.upd_zero_kernel == NULL) { fprintf(stderr, "JIT for TPP upd_zero_kernel failed. Bailing...!\n"); exit(-1); } res.delbias_reduce_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &delbias_K, &delbias_N, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD_NCNC_FORMAT); if (res.delbias_reduce_kernel == NULL) { fprintf(stderr, "JIT for TPP delbias_reduce_kernel failed. Bailing...!\n"); exit(-1); } /* JITing the tranpose kernels */ res.norm_to_vnni_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &lda, &lda, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI); if (res.norm_to_vnni_kernel == NULL) { fprintf(stderr, "JIT for TPP norm_to_vnni_kernel failed. Bailing...!\n"); exit(-1); } res.norm_to_vnni_kernel_wt = libxsmm_dispatch_meltw_unary(bbk, bbc, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI); if (res.norm_to_vnni_kernel_wt == NULL) { fprintf(stderr, "JIT for TPP norm_to_vnni_kernel failed. Bailing...!\n"); exit(-1); } res.norm_to_normT_kernel = libxsmm_dispatch_meltw_unary(bc, bn, &ldb, &ldb_orig, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_NORMT); if (res.norm_to_normT_kernel == NULL) { fprintf(stderr, "JIT for TPP norm_to_normT_kernel failed. Bailing...!\n"); exit(-1); } /* init scratch */ size_bwd_scratch = sizeof(float) * LIBXSMM_MAX(res.C * res.N, res.threads * res.bc * res.bn) + sizeof(libxsmm_bfloat16) * res.C * res.K; res.bwd_private_tr_wt_scratch_mark = size_bwd_scratch; size_bwd_scratch += res.threads * res.bc * res.K * sizeof(libxsmm_bfloat16); size_upd_scratch = sizeof(float) * LIBXSMM_MAX(res.C * res.K, res.threads * res.bc * res.bk) + sizeof(libxsmm_bfloat16) * res.threads * res.bk * res.bc + sizeof(libxsmm_bfloat16) * (res.N * (res.C + res.K)); res.scratch_size = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) + sizeof(libxsmm_bfloat16) * res.N * res.K; res.doutput_scratch_mark = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch); res.upd_private_tr_dact_scratch_mark = res.scratch_size; res.scratch_size += res.threads * res.bk * res.N * sizeof(libxsmm_bfloat16); res.upd_private_tr_act_scratch_mark = res.scratch_size; res.scratch_size += res.threads * res.bc * res.N * (((res.C / res.bc) + res.upd_col_teams - 1) / res.upd_col_teams) * sizeof(libxsmm_bfloat16); return res; } my_opt_config setup_my_opt(libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, float lr) { my_opt_config res; /* setting up some handle values */ res.C = C; res.K = K; res.bc = bc; res.bk = bk; res.threads = threads; if (threads == 16) { res.opt_2d_blocking = 1; res.opt_col_teams = 2; res.opt_row_teams = 8; } else if (threads == 14) { res.opt_2d_blocking = 1; res.opt_col_teams = 2; res.opt_row_teams = 7; } else { res.opt_2d_blocking = 0; res.opt_col_teams = 1; res.opt_row_teams = 1; } res.lr = lr; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = 0; return res; } my_smax_fwd_config setup_my_smax_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads) { my_smax_fwd_config res; /* setting up some handle values */ res.C = C; res.N = N; res.bc = bc; res.bn = bn; res.threads = threads; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = (sizeof(float) * res.C * res.N * 2);; return res; } my_smax_bwd_config setup_my_smax_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads, float loss_weight) { my_smax_bwd_config res; /* setting up some handle values */ res.C = C; res.N = N; res.bc = bc; res.bn = bn; res.threads = threads; res.loss_weight = loss_weight; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = (sizeof(float) * res.C * res.N * 2); return res; } void my_fc_fwd_exec(my_fc_fwd_config cfg, const libxsmm_bfloat16 * wt_ptr, const libxsmm_bfloat16 * in_act_ptr, libxsmm_bfloat16 * out_act_ptr, const libxsmm_bfloat16 * bias_ptr, unsigned char *relu_ptr, int start_tid, int my_tid, void *scratch) { const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint lpb = 2; const libxsmm_blasint bc_lp = cfg.bc / lpb; /* const libxsmm_blasint bc = cfg.bc; */ libxsmm_blasint use_2d_blocking = cfg.fwd_2d_blocking; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksOFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* loop variables */ libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ifm1 = 0; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, output, out_act_ptr, nBlocksOFm, cfg.bn, cfg.bk); LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, input, in_act_ptr, nBlocksIFm, cfg.bn, cfg.bc); LIBXSMM_VLA_DECL(5, const libxsmm_bfloat16, filter, wt_ptr, nBlocksIFm, bc_lp, cfg.bk, lpb); LIBXSMM_VLA_DECL(4, float, output_f32, (float *)scratch, nBlocksOFm, bn, bk); libxsmm_meltw_gemm_param gemm_eltwise_params; float *fp32_bias_scratch = ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (float *)scratch + ltid * cfg.K : NULL; LIBXSMM_VLA_DECL(2, const libxsmm_bfloat16, bias, ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (libxsmm_bfloat16 *) bias_ptr : NULL, cfg.bk); LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32 *) relu_ptr : NULL, nBlocksOFm, cfg.bn, cfg.bk / 32); libxsmm_meltwfunction_unary eltwise_kernel_act = cfg.fwd_cvtfp32bf16_relu_kernel; libxsmm_meltw_unary_param eltwise_params_act; libxsmm_meltwfunction_unary eltwise_kernel = cfg.fwd_cvtfp32bf16_kernel; libxsmm_meltw_unary_param eltwise_params; libxsmm_bmmfunction_reducebatch_strd_meltwfused bf16_batchreduce_kernel_zerobeta_fused_eltwise; libxsmm_meltw_unary_param copy_params; unsigned long long blocks = nBlocksIFm; libxsmm_blasint CB_BLOCKS = nBlocksIFm, BF = 1; if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) && ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) { bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd7; } else if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd4; } else if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd5; } else { bf16_batchreduce_kernel_zerobeta_fused_eltwise = NULL; } BF = cfg.fwd_bf; CB_BLOCKS = nBlocksIFm / BF; blocks = CB_BLOCKS; if (use_2d_blocking == 1) { int _ltid, M_hyperpartition_id, N_hyperpartition_id, _nBlocksOFm, _nBlocksMB, hyperteam_id; col_teams = cfg.fwd_col_teams; row_teams = cfg.fwd_row_teams; hyperteam_id = ltid / (col_teams * row_teams); _nBlocksOFm = nBlocksOFm / cfg.fwd_M_hyperpartitions; _nBlocksMB = nBlocksMB / cfg.fwd_N_hyperpartitions; _ltid = ltid % (col_teams * row_teams); M_hyperpartition_id = hyperteam_id % cfg.fwd_M_hyperpartitions; N_hyperpartition_id = hyperteam_id / cfg.fwd_M_hyperpartitions; my_row_id = _ltid % row_teams; my_col_id = _ltid / row_teams; N_tasks_per_thread = (_nBlocksMB + col_teams - 1) / col_teams; M_tasks_per_thread = (_nBlocksOFm + row_teams - 1) / row_teams; my_N_start = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN(my_col_id * N_tasks_per_thread, _nBlocksMB); my_N_end = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN((my_col_id + 1) * N_tasks_per_thread, _nBlocksMB); my_M_start = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN(my_row_id * M_tasks_per_thread, _nBlocksOFm); my_M_end = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN((my_row_id + 1) * M_tasks_per_thread, _nBlocksOFm); } /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); cfg.fwd_config_kernel(NULL, NULL, NULL); if (use_2d_blocking == 1) { if (BF > 1) { for (ifm1 = 0; ifm1 < BF; ++ifm1) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { if (ifm1 == 0) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { copy_params.in.primary = (void *)&LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0, cfg.bk); copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); cfg.fwd_colbcast_bf16fp32_copy_kernel(&copy_params); } else { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); cfg.fwd_zero_kernel(&copy_params); } } cfg.gemm_fwd(&LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1 * CB_BLOCKS, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1 * CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); if (ifm1 == BF - 1) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { eltwise_params_act.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk / 32); eltwise_kernel_act(&eltwise_params_act); } else { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_kernel(&eltwise_params); } } } } } } else { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { copy_params.in.primary = (void *)&LIBXSMM_VLA_ACCESS(2, bias, 0, 0, cfg.bk); copy_params.out.primary = fp32_bias_scratch; cfg.fwd_copy_bf16fp32_kernel(&copy_params); } for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) || ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { gemm_eltwise_params.bias_ptr = (float *)fp32_bias_scratch + ofm1 * cfg.bk; } if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { gemm_eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk / 32); } bf16_batchreduce_kernel_zerobeta_fused_eltwise(&LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks, &gemm_eltwise_params); } else { cfg.gemm_fwd3(&LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks); } } } } } else { if (BF > 1) { for (ifm1 = 0; ifm1 < BF; ++ifm1) { for (mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1) { mb1 = mb1ofm1 % nBlocksMB; ofm1 = mb1ofm1 / nBlocksMB; /* Initialize libxsmm_blasintermediate f32 tensor */ if (ifm1 == 0) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { copy_params.in.primary = (void *)&LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0, cfg.bk); copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); cfg.fwd_colbcast_bf16fp32_copy_kernel(&copy_params); } else { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); cfg.fwd_zero_kernel(&copy_params); } } cfg.gemm_fwd(&LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1 * CB_BLOCKS, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1 * CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); if (ifm1 == BF - 1) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { eltwise_params_act.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk / 32); eltwise_kernel_act(&eltwise_params_act); } else { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_kernel(&eltwise_params); } } } } } else { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { copy_params.in.primary = (void *)&LIBXSMM_VLA_ACCESS(2, bias, 0, 0, cfg.bk); copy_params.out.primary = fp32_bias_scratch; cfg.fwd_copy_bf16fp32_kernel(&copy_params); } for (mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1) { mb1 = mb1ofm1 % nBlocksMB; ofm1 = mb1ofm1 / nBlocksMB; if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) || ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { gemm_eltwise_params.bias_ptr = (float *)fp32_bias_scratch + ofm1 * cfg.bk; } if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { gemm_eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk / 32); } bf16_batchreduce_kernel_zerobeta_fused_eltwise(&LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks, &gemm_eltwise_params); } else { cfg.gemm_fwd3(&LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks); } } } } cfg.tilerelease_kernel(NULL, NULL, NULL); libxsmm_barrier_wait(cfg.barrier, ltid); } void my_fc_bwd_exec(my_fc_bwd_config cfg, libxsmm_bfloat16 * wt_ptr, libxsmm_bfloat16 * din_act_ptr, const libxsmm_bfloat16 * dout_act_ptr, libxsmm_bfloat16 * dwt_ptr, const libxsmm_bfloat16 * in_act_ptr, libxsmm_bfloat16 * dbias_ptr, const unsigned char *relu_ptr, my_pass pass, int start_tid, int my_tid, void *scratch, float *fil_master) { /* size variables, all const */ /* here we assume that input and output blocking is similar */ const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint bc = cfg.bc; libxsmm_blasint lpb = 2; const libxsmm_blasint bc_lp = bc / lpb; const libxsmm_blasint bk_lp = bk / lpb; const libxsmm_blasint bn_lp = bn / lpb; const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ofm2 = 0; libxsmm_blasint performed_doutput_transpose = 0; libxsmm_meltw_unary_param trans_param; unsigned int i; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint eltwise_work = nBlocksOFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint eltwise_chunksize = (eltwise_work % cfg.threads == 0) ? (eltwise_work / cfg.threads) : ((eltwise_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint eltwise_thr_begin = (ltid * eltwise_chunksize < eltwise_work) ? (ltid * eltwise_chunksize) : eltwise_work; const libxsmm_blasint eltwise_thr_end = ((ltid + 1) * eltwise_chunksize < eltwise_work) ? ((ltid + 1) * eltwise_chunksize) : eltwise_work; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint dbias_work = nBlocksOFm; /* compute chunk size */ const libxsmm_blasint dbias_chunksize = (dbias_work % cfg.threads == 0) ? (dbias_work / cfg.threads) : ((dbias_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint dbias_thr_begin = (ltid * dbias_chunksize < dbias_work) ? (ltid * dbias_chunksize) : dbias_work; const libxsmm_blasint dbias_thr_end = ((ltid + 1) * dbias_chunksize < dbias_work) ? ((ltid + 1) * dbias_chunksize) : dbias_work; LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, dbias, ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (libxsmm_bfloat16 *) dbias_ptr : NULL, cfg.bk); libxsmm_blasint ext_blocks = (cfg.fuse_relu_bwd == 1) ? nBlocksIFm : nBlocksOFm; libxsmm_blasint int_blocks = (cfg.fuse_relu_bwd == 1) ? cfg.bc : cfg.bk; LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32 *) relu_ptr : NULL, ext_blocks, cfg.bn, int_blocks / 32); libxsmm_bfloat16 *grad_output_ptr = (libxsmm_bfloat16 *) dout_act_ptr; libxsmm_bfloat16 *tr_doutput_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) ? (libxsmm_bfloat16 *) ((char *)scratch + cfg.doutput_scratch_mark) : (libxsmm_bfloat16 *) scratch; LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, doutput_orig, (libxsmm_bfloat16 *) dout_act_ptr, nBlocksOFm, bn, bk); libxsmm_meltw_unary_param relu_params; libxsmm_meltwfunction_unary relu_kernel = cfg.bwd_relu_kernel; LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, doutput, grad_output_ptr, nBlocksOFm, bn, bk); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, doutput_tr, tr_doutput_ptr, nBlocksMB, bn_lp, bk, lpb); libxsmm_meltwfunction_unary eltwise_kernel = cfg.bwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary eltwise_kernel2 = cfg.upd_cvtfp32bf16_kernel; libxsmm_meltw_unary_param eltwise_params; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param delbias_params; libxsmm_meltw_gemm_param eltwise_params_bwd; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); cfg.bwd_config_kernel(NULL, NULL, NULL); if (cfg.upd_2d_blocking == 0) { /* Apply to doutput potential fusions */ if (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) { for (mb1ofm1 = eltwise_thr_begin; mb1ofm1 < eltwise_thr_end; ++mb1ofm1) { mb1 = mb1ofm1 / nBlocksOFm; ofm1 = mb1ofm1 % nBlocksOFm; relu_params.in.primary = (void *)&LIBXSMM_VLA_ACCESS(4, doutput_orig, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); relu_params.out.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); relu_params.in.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk / 32); relu_kernel(&relu_params); /* If in UPD pass, also perform transpose of doutput */ if ((pass & MY_PASS_BWD_W) == MY_PASS_BWD_W) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, mb1, 0, 0, 0, nBlocksMB, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } } if ((pass & MY_PASS_BWD_W) == MY_PASS_BWD_W) { performed_doutput_transpose = 1; } libxsmm_barrier_wait(cfg.barrier, ltid); } /* Accumulation of bias happens in f32 */ if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS)) { for (ofm1 = dbias_thr_begin; ofm1 < dbias_thr_end; ++ofm1) { delbias_params.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); delbias_params.out.primary = &LIBXSMM_VLA_ACCESS(2, dbias, ofm1, 0, cfg.bk); cfg.delbias_reduce_kernel(&delbias_params); } /* wait for eltwise to finish */ libxsmm_barrier_wait(cfg.barrier, ltid); } } if ((pass & MY_PASS_BWD_D) == MY_PASS_BWD_D) { libxsmm_blasint use_2d_blocking = cfg.bwd_2d_blocking; /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksIFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint transpose_work = nBlocksIFm * nBlocksOFm; /* compute chunk size */ const libxsmm_blasint transpose_chunksize = (transpose_work % cfg.threads == 0) ? (transpose_work / cfg.threads) : ((transpose_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint transpose_thr_begin = (ltid * transpose_chunksize < transpose_work) ? (ltid * transpose_chunksize) : transpose_work; const libxsmm_blasint transpose_thr_end = ((ltid + 1) * transpose_chunksize < transpose_work) ? ((ltid + 1) * transpose_chunksize) : transpose_work; /* loop variables */ libxsmm_blasint ifm1 = 0, ifm1ofm1 = 0, mb1ifm1 = 0; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter, (libxsmm_bfloat16 *) wt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, dinput, (libxsmm_bfloat16 *) din_act_ptr, nBlocksIFm, bn, bc); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter_tr, (libxsmm_bfloat16 *) scratch, nBlocksOFm, bk_lp, bc, lpb); float *temp_output = (float *)scratch + (cfg.C * cfg.K) / 2; LIBXSMM_VLA_DECL(4, float, dinput_f32, (float *)temp_output, nBlocksIFm, bn, bc); unsigned long long blocks = nBlocksOFm; libxsmm_blasint KB_BLOCKS = nBlocksOFm, BF = 1; BF = cfg.bwd_bf; KB_BLOCKS = nBlocksOFm / BF; blocks = KB_BLOCKS; if (use_2d_blocking == 1) { int _ltid, M_hyperpartition_id, N_hyperpartition_id, _nBlocksIFm, _nBlocksMB, hyperteam_id; col_teams = cfg.bwd_col_teams; row_teams = cfg.bwd_row_teams; hyperteam_id = ltid / (col_teams * row_teams); _nBlocksIFm = nBlocksIFm / cfg.bwd_M_hyperpartitions; _nBlocksMB = nBlocksMB / cfg.bwd_N_hyperpartitions; _ltid = ltid % (col_teams * row_teams); M_hyperpartition_id = hyperteam_id % cfg.bwd_M_hyperpartitions; N_hyperpartition_id = hyperteam_id / cfg.bwd_M_hyperpartitions; my_row_id = _ltid % row_teams; my_col_id = _ltid / row_teams; N_tasks_per_thread = (_nBlocksMB + col_teams - 1) / col_teams; M_tasks_per_thread = (_nBlocksIFm + row_teams - 1) / row_teams; my_N_start = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN(my_col_id * N_tasks_per_thread, _nBlocksMB); my_N_end = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN((my_col_id + 1) * N_tasks_per_thread, _nBlocksMB); my_M_start = M_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN(my_row_id * M_tasks_per_thread, _nBlocksIFm); my_M_end = M_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN((my_row_id + 1) * M_tasks_per_thread, _nBlocksIFm); } /* transpose weight */ if (cfg.bwd_2d_blocking == 0) { for (ifm1ofm1 = transpose_thr_begin; ifm1ofm1 < transpose_thr_end; ++ifm1ofm1) { ofm1 = ifm1ofm1 / nBlocksIFm; ifm1 = ifm1ofm1 % nBlocksIFm; trans_param.in.primary = &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb); cfg.vnni_to_vnniT_kernel(&trans_param); } /* wait for transpose to finish */ libxsmm_barrier_wait(cfg.barrier, ltid); } if (use_2d_blocking == 1) { LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, tmp_filter_tr, ((libxsmm_bfloat16 *) ((char *)scratch + cfg.bwd_private_tr_wt_scratch_mark)) + ltid * bc * cfg.K, bk_lp, bc, lpb); if (BF > 1) { for (ofm1 = 0; ofm1 < BF; ++ofm1) { for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) { for (ofm2 = ofm1 * KB_BLOCKS; ofm2 < (ofm1 + 1) * KB_BLOCKS; ofm2++) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(5, filter, ofm2, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, ofm2, 0, 0, 0, bk_lp, bc, lpb); cfg.vnni_to_vnniT_kernel(&trans_param); } for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { /* Initialize libxsmm_blasintermediate f32 tensor */ if (ofm1 == 0) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); cfg.bwd_zero_kernel(&copy_params); } cfg.gemm_bwd(&LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, ofm1 * KB_BLOCKS, 0, 0, 0, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1 * KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); /* * downconvert libxsmm_blasintermediate f32 * tensor to bf 16 and store to final C */ if (ofm1 == BF - 1) { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_kernel(&eltwise_params); if (cfg.fuse_relu_bwd > 0) { relu_params.in.primary = (void *)&LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); relu_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); relu_params.in.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ifm1, 0, 0, nBlocksIFm, cfg.bn, cfg.bc / 32); cfg.bwd_fused_relu_kernel(&relu_params); } } } } } } else { for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) { for (ofm2 = 0; ofm2 < nBlocksOFm; ofm2++) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(5, filter, ofm2, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, ofm2, 0, 0, 0, bk_lp, bc, lpb); cfg.vnni_to_vnniT_kernel(&trans_param); } for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { if (cfg.fuse_relu_bwd > 0) { eltwise_params_bwd.relu_bitmask_bwd = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ifm1, 0, 0, nBlocksIFm, cfg.bn, cfg.bc / 32); cfg.gemm_bwd5(&LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, 0, 0, 0, 0, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks, &eltwise_params_bwd); } else { cfg.gemm_bwd3(&LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, 0, 0, 0, 0, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } } } else { if (BF > 1) { for (ofm1 = 0; ofm1 < BF; ++ofm1) { for (mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1) { mb1 = mb1ifm1 % nBlocksMB; ifm1 = mb1ifm1 / nBlocksMB; /* Initialize libxsmm_blasintermediate f32 tensor */ if (ofm1 == 0) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); cfg.bwd_zero_kernel(&copy_params); } cfg.gemm_bwd(&LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1 * KB_BLOCKS, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1 * KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); /* * downconvert libxsmm_blasintermediate f32 tensor to * bf 16 and store to final C */ if (ofm1 == BF - 1) { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_kernel(&eltwise_params); } } } } else { for (mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1) { mb1 = mb1ifm1 % nBlocksMB; ifm1 = mb1ifm1 / nBlocksMB; cfg.gemm_bwd3(&LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, 0, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } libxsmm_barrier_wait(cfg.barrier, ltid); } if (cfg.upd_2d_blocking == 1) { /* Accumulation of bias happens in f32 */ if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS)) { for (ofm1 = dbias_thr_begin; ofm1 < dbias_thr_end; ++ofm1) { delbias_params.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); delbias_params.out.primary = &LIBXSMM_VLA_ACCESS(2, dbias, ofm1, 0, cfg.bk); cfg.delbias_reduce_kernel(&delbias_params); } } } if ((pass & MY_PASS_BWD_W) == MY_PASS_BWD_W) { /* number of tasks that could be run in parallel */ const libxsmm_blasint ofm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ofm_subtasks; const libxsmm_blasint ifm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ifm_subtasks; const libxsmm_blasint bbk = (cfg.upd_2d_blocking == 1) ? bk : bk / ofm_subtasks; const libxsmm_blasint bbc = (cfg.upd_2d_blocking == 1) ? bc : bc / ifm_subtasks; const libxsmm_blasint work = nBlocksIFm * ifm_subtasks * nBlocksOFm * ofm_subtasks; const libxsmm_blasint Cck_work = nBlocksIFm * ifm_subtasks * ofm_subtasks; const libxsmm_blasint Cc_work = nBlocksIFm * ifm_subtasks; /* 2D blocking parameters */ libxsmm_blasint use_2d_blocking = cfg.upd_2d_blocking; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; libxsmm_blasint BF = cfg.upd_bf; /* loop variables */ libxsmm_blasint ifm1ofm1 = 0, ifm1 = 0, ifm2 = 0, mb3 = 0, bfn = 0, mb1ifm1 = 0; /* Batch reduce related variables */ unsigned long long blocks = nBlocksMB / BF; LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, input, (libxsmm_bfloat16 *) in_act_ptr, nBlocksIFm, bn, bc); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, dfilter, (libxsmm_bfloat16 *) dwt_ptr, nBlocksIFm, bc_lp, bk, lpb); /* * Set up tensors for transposing/scratch before vnni reformatting * dfilter */ libxsmm_bfloat16 *tr_inp_ptr = (libxsmm_bfloat16 *) ((libxsmm_bfloat16 *) scratch + cfg.N * cfg.K); float *dfilter_f32_ptr = (float *)((libxsmm_bfloat16 *) tr_inp_ptr + cfg.N * cfg.C); #ifndef BYPASS_SGD libxsmm_bfloat16 *dfilter_scratch = (libxsmm_bfloat16 *) ((float *)dfilter_f32_ptr + cfg.C * cfg.K) + ltid * bc * bk; #endif LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, input_tr, (libxsmm_bfloat16 *) tr_inp_ptr, nBlocksMB, bc, bn); LIBXSMM_VLA_DECL(4, float, dfilter_f32, (float *)dfilter_f32_ptr, nBlocksIFm, bc, bk); #ifndef BYPASS_SGD LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, dfilter_block, (libxsmm_bfloat16 *) dfilter_scratch, bk); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter, (libxsmm_bfloat16 *) wt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(4, float, master_filter, (float *)fil_master, nBlocksIFm, bc, bk); #endif const libxsmm_blasint tr_out_work = nBlocksMB * nBlocksOFm; const libxsmm_blasint tr_out_chunksize = (tr_out_work % cfg.threads == 0) ? (tr_out_work / cfg.threads) : ((tr_out_work / cfg.threads) + 1); const libxsmm_blasint tr_out_thr_begin = (ltid * tr_out_chunksize < tr_out_work) ? (ltid * tr_out_chunksize) : tr_out_work; const libxsmm_blasint tr_out_thr_end = ((ltid + 1) * tr_out_chunksize < tr_out_work) ? ((ltid + 1) * tr_out_chunksize) : tr_out_work; const libxsmm_blasint tr_inp_work = nBlocksMB * nBlocksIFm; const libxsmm_blasint tr_inp_chunksize = (tr_inp_work % cfg.threads == 0) ? (tr_inp_work / cfg.threads) : ((tr_inp_work / cfg.threads) + 1); const libxsmm_blasint tr_inp_thr_begin = (ltid * tr_inp_chunksize < tr_inp_work) ? (ltid * tr_inp_chunksize) : tr_inp_work; const libxsmm_blasint tr_inp_thr_end = ((ltid + 1) * tr_inp_chunksize < tr_inp_work) ? ((ltid + 1) * tr_inp_chunksize) : tr_inp_work; if (use_2d_blocking == 1) { int _ltid, M_hyperpartition_id, N_hyperpartition_id, _nBlocksOFm, _nBlocksIFm, hyperteam_id; col_teams = cfg.upd_col_teams; row_teams = cfg.upd_row_teams; hyperteam_id = ltid / (col_teams * row_teams); _nBlocksOFm = nBlocksOFm / cfg.upd_M_hyperpartitions; _nBlocksIFm = nBlocksIFm / cfg.upd_N_hyperpartitions; _ltid = ltid % (col_teams * row_teams); M_hyperpartition_id = hyperteam_id % cfg.upd_M_hyperpartitions; N_hyperpartition_id = hyperteam_id / cfg.upd_M_hyperpartitions; my_row_id = _ltid % row_teams; my_col_id = _ltid / row_teams; N_tasks_per_thread = (_nBlocksIFm + col_teams - 1) / col_teams; M_tasks_per_thread = (_nBlocksOFm + row_teams - 1) / row_teams; my_N_start = N_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN(my_col_id * N_tasks_per_thread, _nBlocksIFm); my_N_end = N_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN((my_col_id + 1) * N_tasks_per_thread, _nBlocksIFm); my_M_start = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN(my_row_id * M_tasks_per_thread, _nBlocksOFm); my_M_end = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN((my_row_id + 1) * M_tasks_per_thread, _nBlocksOFm); } if (cfg.upd_2d_blocking == 0) { /* Required upfront tranposes */ for (mb1ifm1 = tr_inp_thr_begin; mb1ifm1 < tr_inp_thr_end; mb1ifm1++) { mb1 = mb1ifm1 % nBlocksMB; ifm1 = mb1ifm1 / nBlocksMB; trans_param.in.primary = (void *)&LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, mb1, 0, 0, nBlocksMB, bc, bn); cfg.norm_to_normT_kernel(&trans_param); } } if (cfg.upd_2d_blocking == 0) { if (performed_doutput_transpose == 0) { for (mb1ofm1 = tr_out_thr_begin; mb1ofm1 < tr_out_thr_end; mb1ofm1++) { mb1 = mb1ofm1 % nBlocksMB; ofm1 = mb1ofm1 / nBlocksMB; trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, mb1, 0, 0, 0, nBlocksMB, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } } libxsmm_barrier_wait(cfg.barrier, ltid); } if (use_2d_blocking == 1) { LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, tmp_input_tr, ((libxsmm_bfloat16 *) ((char *)scratch + cfg.upd_private_tr_act_scratch_mark)) + ltid * bc * cfg.N * N_tasks_per_thread, nBlocksMB, bc, bn); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, tmp_doutput_tr, ((libxsmm_bfloat16 *) ((char *)scratch + cfg.upd_private_tr_dact_scratch_mark)) + ltid * bk * cfg.N, bn_lp, bk, lpb); ifm2 = 0; ofm2 = 0; if (BF == 1) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { /* Transpose output block */ for (mb3 = 0; mb3 < nBlocksMB; mb3++) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb3, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, mb3, 0, 0, 0, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { /* Transpose input block */ if (ofm1 == my_M_start) { for (mb3 = 0; mb3 < nBlocksMB; mb3++) { trans_param.in.primary = (void *)&LIBXSMM_VLA_ACCESS(4, input, mb3, ifm1, 0, 0, nBlocksIFm, bn, bc); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1 - my_N_start, mb3, 0, 0, nBlocksMB, bc, bn); cfg.norm_to_normT_kernel(&trans_param); } } if ((bc % 16 == 0) && (bk % 16 == 0)) { cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, 0, 0, 0, 0, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1 - my_N_start, 0, 0, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb), &blocks); { __m512 vlr = _mm512_set1_ps(cfg.lr); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float *)&LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for (i = 0; i < bc * bk; i += 16) { __m512 newfilter = _mm512_sub_ps(_mm512_loadu_ps(wt_fp32 + i), _mm512_mul_ps(vlr, _mm512_load_fil((libxsmm_bfloat16 *) dwt_bf16 + i))); _mm512_store_fil(wt_bf16 + i, newfilter); _mm512_storeu_ps(wt_fp32 + i, newfilter); } } } else { cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, 0, 0, 0, 0, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1 - my_N_start, 0, 0, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(2, dfilter_block, 0, 0, bk), &blocks); trans_param.in.primary = &LIBXSMM_VLA_ACCESS(2, dfilter_block, 0, 0, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); cfg.norm_to_vnni_kernel_wt(&trans_param); { __m512 vlr = _mm512_set1_ps(cfg.lr); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float *)&LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for (i = 0; i < bc * bk; i += 16) { __m512 newfilter = _mm512_sub_ps(_mm512_loadu_ps(wt_fp32 + i), _mm512_mul_ps(vlr, _mm512_load_fil((libxsmm_bfloat16 *) dwt_bf16 + i))); _mm512_store_fil(wt_bf16 + i, newfilter); _mm512_storeu_ps(wt_fp32 + i, newfilter); } } } } } } else { for (bfn = 0; bfn < BF; bfn++) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { /* Transpose output block */ for (mb3 = bfn * blocks; mb3 < (bfn + 1) * blocks; mb3++) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb3, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, mb3, 0, 0, 0, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { /* Transpose input block */ if (ofm1 == my_M_start) { for (mb3 = bfn * blocks; mb3 < (bfn + 1) * blocks; mb3++) { trans_param.in.primary = (void *)&LIBXSMM_VLA_ACCESS(4, input, mb3, ifm1, 0, 0, nBlocksIFm, bn, bc); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1 - my_N_start, mb3, 0, 0, nBlocksMB, bc, bn); cfg.norm_to_normT_kernel(&trans_param); } } /* initialize current work task to zero */ if (bfn == 0) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2 * bbc, ofm2 * bbk, nBlocksIFm, bc, bk); cfg.upd_zero_kernel(&copy_params); } cfg.gemm_upd(&LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, bfn * blocks, 0, 0, 0, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1 - my_N_start, bfn * blocks, ifm2 * bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2 * bbc, ofm2 * bbk, nBlocksIFm, bc, bk), &blocks); /* Downconvert result to BF16 and vnni format */ if (bfn == BF - 1) { LIBXSMM_ALIGNED(libxsmm_bfloat16 tmp_buf[bc][bk], 64); eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); eltwise_params.out.primary = tmp_buf; trans_param.in.primary = tmp_buf; trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); eltwise_kernel2(&eltwise_params); cfg.norm_to_vnni_kernel_wt(&trans_param); #ifndef BYPASS_SGD { __m512 vlr = _mm512_set1_ps(cfg.lr); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float *)&LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for (i = 0; i < bc * bk; i += 16) { __m512 newfilter = _mm512_sub_ps(_mm512_loadu_ps(wt_fp32 + i), _mm512_mul_ps(vlr, _mm512_load_fil((libxsmm_bfloat16 *) dwt_bf16 + i))); _mm512_store_fil(wt_bf16 + i, newfilter); _mm512_storeu_ps(wt_fp32 + i, newfilter); } } #endif } } } } } } else { if (BF == 1) { for (ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1) { ofm1 = ifm1ofm1 / Cck_work; ofm2 = (ifm1ofm1 % Cck_work) / Cc_work; ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks; ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks; cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, 0, 0, ofm2 * bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, 0, ifm2 * bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, (ifm2 * bbc) / lpb, ofm2 * bbk, 0, nBlocksIFm, bc_lp, bk, lpb), &blocks); #ifndef BYPASS_SGD { __m512 vlr = _mm512_set1_ps(cfg.lr); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float *)&LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for (i = 0; i < bc * bk; i += 16) { __m512 newfilter = _mm512_sub_ps(_mm512_loadu_ps(wt_fp32 + i), _mm512_mul_ps(vlr, _mm512_load_fil((libxsmm_bfloat16 *) dwt_bf16 + i))); _mm512_store_fil(wt_bf16 + i, newfilter); _mm512_storeu_ps(wt_fp32 + i, newfilter); } } #endif } } else { for (bfn = 0; bfn < BF; bfn++) { for (ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1) { ofm1 = ifm1ofm1 / Cck_work; ofm2 = (ifm1ofm1 % Cck_work) / Cc_work; ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks; ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks; /* initialize current work task to zero */ if (bfn == 0) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2 * bbc, ofm2 * bbk, nBlocksIFm, bc, bk); cfg.upd_zero_kernel(&copy_params); } cfg.gemm_upd(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, bfn * blocks, 0, ofm2 * bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, bfn * blocks, ifm2 * bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2 * bbc, ofm2 * bbk, nBlocksIFm, bc, bk), &blocks); /* Downconvert result to BF16 and vnni format */ if (bfn == BF - 1) { LIBXSMM_ALIGNED(libxsmm_bfloat16 tmp_buf[bc][bk], 64); eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2 * bbc, ofm2 * bbk, nBlocksIFm, bc, bk); eltwise_params.out.primary = tmp_buf; trans_param.in.primary = tmp_buf; trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, (ifm2 * bbc) / lpb, ofm2 * bbk, 0, nBlocksIFm, bc_lp, bk, lpb); eltwise_kernel2(&eltwise_params); cfg.norm_to_vnni_kernel_wt(&trans_param); #ifndef BYPASS_SGD { __m512 vlr = _mm512_set1_ps(cfg.lr); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float *)&LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for (i = 0; i < bc * bk; i += 16) { __m512 newfilter = _mm512_sub_ps(_mm512_loadu_ps(wt_fp32 + i), _mm512_mul_ps(vlr, _mm512_load_fil((libxsmm_bfloat16 *) dwt_bf16 + i))); _mm512_store_fil(wt_bf16 + i, newfilter); _mm512_storeu_ps(wt_fp32 + i, newfilter); } } #endif } } } } } libxsmm_barrier_wait(cfg.barrier, ltid); } cfg.tilerelease_kernel(NULL, NULL, NULL); } void my_opt_exec(my_opt_config cfg, libxsmm_bfloat16 * wt_ptr, float *master_wt_ptr, const libxsmm_bfloat16 * delwt_ptr, int start_tid, int my_tid, void *scratch) { /* loop counters */ libxsmm_blasint i; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint bc = cfg.bc; libxsmm_blasint lpb = 2; const libxsmm_blasint bc_lp = bc / lpb; const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; /* number of tasks that could run in parallel for the filters */ const libxsmm_blasint work = cfg.C * cfg.K; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); #if defined(__AVX512BW__) __m512 vlr = _mm512_set1_ps(cfg.lr); if (cfg.opt_2d_blocking == 1) { libxsmm_blasint ofm1, ifm1; libxsmm_blasint col_teams = cfg.opt_col_teams; libxsmm_blasint row_teams = cfg.opt_row_teams; libxsmm_blasint my_row_id = ltid % row_teams; libxsmm_blasint my_col_id = ltid / row_teams; libxsmm_blasint N_tasks_per_thread = (nBlocksIFm + col_teams - 1) / col_teams; libxsmm_blasint M_tasks_per_thread = (nBlocksOFm + row_teams - 1) / row_teams; libxsmm_blasint my_N_start = LIBXSMM_MIN(my_col_id * N_tasks_per_thread, nBlocksIFm); libxsmm_blasint my_N_end = LIBXSMM_MIN((my_col_id + 1) * N_tasks_per_thread, nBlocksIFm); libxsmm_blasint my_M_start = LIBXSMM_MIN(my_row_id * M_tasks_per_thread, nBlocksOFm); libxsmm_blasint my_M_end = LIBXSMM_MIN((my_row_id + 1) * M_tasks_per_thread, nBlocksOFm); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, dfilter, (libxsmm_bfloat16 *) delwt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter, (libxsmm_bfloat16 *) wt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(4, float, master_filter, (float *)master_wt_ptr, nBlocksIFm, bc, bk); libxsmm_bfloat16 *wt_bf16, *dwt_bf16; float *wt_fp32; for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { dwt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); wt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); wt_fp32 = (float *)&LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for (i = 0; i < bc * bk; i += 16) { __m512 newfilter = _mm512_sub_ps(_mm512_loadu_ps(wt_fp32 + i), _mm512_mul_ps(vlr, _mm512_load_fil((libxsmm_bfloat16 *) dwt_bf16 + i))); _mm512_store_fil(wt_bf16 + i, newfilter); _mm512_storeu_ps(wt_fp32 + i, newfilter); } } } } else { libxsmm_blasint iv = ((thr_end - thr_begin) / 16) * 16; /* compute iterations * which are * vectorizable */ for (i = thr_begin; i < thr_begin + iv; i += 16) { __m512 newfilter = _mm512_sub_ps(_mm512_loadu_ps(master_wt_ptr + i), _mm512_mul_ps(vlr, _mm512_load_fil(delwt_ptr + i))); _mm512_store_fil(wt_ptr + i, newfilter); _mm512_storeu_ps(master_wt_ptr + i, newfilter); } for (i = thr_begin + iv; i < thr_end; ++i) { libxsmm_bfloat16_hp t1, t2; t1.i[0] = 0; t1.i[1] = delwt_ptr[i]; master_wt_ptr[i] = master_wt_ptr[i] - (cfg.lr * t1.f); t2.f = master_wt_ptr[i]; wt_ptr[i] = t2.i[1]; } } #else for (i = thr_begin; i < thr_end; ++i) { libxsmm_bfloat16_hp t1, t2; t1.i[0] = 0; t1.i[1] = delwt_ptr[i]; master_wt_ptr[i] = master_wt_ptr[i] - (cfg.lr * t1.f); t2.f = master_wt_ptr[i]; wt_ptr[i] = t2.i[1]; } #endif libxsmm_barrier_wait(cfg.barrier, ltid); } void my_smax_fwd_exec(my_smax_fwd_config cfg, const libxsmm_bfloat16 * in_act_ptr, libxsmm_bfloat16 * out_act_ptr, const int *label_ptr, float *loss, int start_tid, int my_tid, void *scratch) { libxsmm_blasint bn = cfg.bn; libxsmm_blasint Bn = cfg.N / cfg.bn; libxsmm_blasint bc = cfg.bc; libxsmm_blasint Bc = cfg.C / cfg.bc; /* loop counters */ libxsmm_blasint i = 0; libxsmm_blasint img1, img2, ifm1, ifm2; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint n_work = Bn * bn; /* compute chunk size */ const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work; const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint nc_work = Bn * bn * Bc * bc; /* compute chunk size */ const libxsmm_blasint nc_chunksize = (nc_work % cfg.threads == 0) ? (nc_work / cfg.threads) : ((nc_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint nc_thr_begin = (ltid * nc_chunksize < nc_work) ? (ltid * nc_chunksize) : nc_work; const libxsmm_blasint nc_thr_end = ((ltid + 1) * nc_chunksize < nc_work) ? ((ltid + 1) * nc_chunksize) : nc_work; libxsmm_bfloat16 *poutput_bf16 = out_act_ptr; const libxsmm_bfloat16 *pinput_bf16 = in_act_ptr; float *poutput_fp32 = (float *)scratch; float *pinput_fp32 = ((float *)scratch) + (cfg.N * cfg.C); LIBXSMM_VLA_DECL(4, float, output, poutput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(4, const float, input, pinput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn); /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); for (i = nc_thr_begin; i < nc_thr_end; ++i) { libxsmm_bfloat16_hp in; in.i[0] = 0; in.i[1] = pinput_bf16[i]; pinput_fp32[i] = in.f; } libxsmm_barrier_wait(cfg.barrier, ltid); for (i = n_thr_begin; i < n_thr_end; ++i) { float max = FLT_MIN; float sum_of_exp = 0.0 f; img1 = i / bn; img2 = i % bn; /* set output to input and set compute max per image */ for (ifm1 = 0; ifm1 < Bc; ++ifm1) { for (ifm2 = 0; ifm2 < bc; ++ifm2) { LIBXSMM_VLA_ACCESS(4, output, img1, ifm1, img2, ifm2, Bc, bn, bc) = LIBXSMM_VLA_ACCESS(4, input, img1, ifm1, img2, ifm2, Bc, bn, bc); if (LIBXSMM_VLA_ACCESS(4, input, img1, ifm1, img2, ifm2, Bc, bn, bc) > max) { max = LIBXSMM_VLA_ACCESS(4, input, img1, ifm1, img2, ifm2, Bc, bn, bc); } } } /* sum exp over outputs */ for (ifm1 = 0; ifm1 < Bc; ++ifm1) { for (ifm2 = 0; ifm2 < bc; ++ifm2) { LIBXSMM_VLA_ACCESS(4, output, img1, ifm1, img2, ifm2, Bc, bn, bc) = (float)exp((double)(LIBXSMM_VLA_ACCESS(4, output, img1, ifm1, img2, ifm2, Bc, bn, bc) - max)); sum_of_exp += LIBXSMM_VLA_ACCESS(4, output, img1, ifm1, img2, ifm2, Bc, bn, bc); } } /* scale output */ sum_of_exp = 1.0 f / sum_of_exp; for (ifm1 = 0; ifm1 < Bc; ++ifm1) { for (ifm2 = 0; ifm2 < bc; ++ifm2) { LIBXSMM_VLA_ACCESS(4, output, img1, ifm1, img2, ifm2, Bc, bn, bc) = LIBXSMM_VLA_ACCESS(4, output, img1, ifm1, img2, ifm2, Bc, bn, bc) * sum_of_exp; } } } libxsmm_barrier_wait(cfg.barrier, ltid); /* calculate loss single threaded */ if (ltid == 0) { (*loss) = 0.0 f; for (img1 = 0; img1 < Bn; ++img1) { for (img2 = 0; img2 < bn; ++img2) { libxsmm_blasint ifm = (libxsmm_blasint) LIBXSMM_VLA_ACCESS(2, label, img1, img2, bn); libxsmm_blasint ifm1b = ifm / bc; libxsmm_blasint ifm2b = ifm % bc; float val = (LIBXSMM_VLA_ACCESS(4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc) > FLT_MIN) ? LIBXSMM_VLA_ACCESS(4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc) : FLT_MIN; *loss += LIBXSMM_LOGF(val); } } *loss = ((-1.0 f) * (*loss)) / cfg.N; } libxsmm_barrier_wait(cfg.barrier, ltid); for (i = nc_thr_begin; i < nc_thr_end; ++i) { libxsmm_bfloat16_hp in; in.f = poutput_fp32[i]; poutput_bf16[i] = in.i[1]; } libxsmm_barrier_wait(cfg.barrier, ltid); } void my_vnni_reformat_exec(my_vnni_reformat_config cfg, libxsmm_bfloat16 * delin_act_ptr, libxsmm_bfloat16 * tr_delin_act_ptr, unsigned char *relu_ptr, int start_tid, int my_tid) { /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bc = cfg.bc; const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; libxsmm_blasint lpb = 2; const libxsmm_blasint bn_lp = bn / lpb; libxsmm_blasint mb1ifm1, mb1, ifm1; libxsmm_meltw_unary_param trans_param; libxsmm_meltw_unary_param relu_params; LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32 *) relu_ptr : NULL, nBlocksIFm, cfg.bn, cfg.bc / 32); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, tr_dinput, (libxsmm_bfloat16 *) tr_delin_act_ptr, nBlocksMB, bn_lp, bc, lpb); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, dinput, (libxsmm_bfloat16 *) delin_act_ptr, nBlocksIFm, bn, bc); /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksIFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; LIBXSMM_UNUSED(trans_param); LIBXSMM_UNUSED(tr_dinput_); /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); for (mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1) { mb1 = mb1ifm1 % nBlocksMB; ifm1 = mb1ifm1 / nBlocksMB; if (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) { relu_params.in.primary = (void *)&LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); relu_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); relu_params.in.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ifm1, 0, 0, nBlocksIFm, cfg.bn, cfg.bc / 32); cfg.fused_relu_kernel(&relu_params); } } libxsmm_barrier_wait(cfg.barrier, ltid); } void my_smax_bwd_exec(my_smax_bwd_config cfg, libxsmm_bfloat16 * delin_act_ptr, const libxsmm_bfloat16 * out_act_ptr, const int *label_ptr, int start_tid, int my_tid, void *scratch) { libxsmm_blasint bn = cfg.bn; libxsmm_blasint Bn = cfg.N / cfg.bn; libxsmm_blasint bc = cfg.bc; libxsmm_blasint Bc = cfg.C / cfg.bc; /* loop counters */ libxsmm_blasint i = 0; libxsmm_blasint img1, img2, ifm1, ifm2; float rcp_N = 1.0 f / cfg.N; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint n_work = Bn * bn; /* compute chunk size */ const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work; const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint nc_work = Bn * bn * Bc * bc; /* compute chunk size */ const int nc_chunksize = (nc_work % cfg.threads == 0) ? (nc_work / cfg.threads) : ((nc_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const int nc_thr_begin = (ltid * nc_chunksize < nc_work) ? (ltid * nc_chunksize) : nc_work; const int nc_thr_end = ((ltid + 1) * nc_chunksize < nc_work) ? ((ltid + 1) * nc_chunksize) : nc_work; const libxsmm_bfloat16 *poutput_bf16 = out_act_ptr; libxsmm_bfloat16 *pdinput_bf16 = delin_act_ptr; float *poutput_fp32 = (float *)scratch; float *pdinput_fp32 = ((float *)scratch) + (cfg.N * cfg.C); LIBXSMM_VLA_DECL(4, const float, output, poutput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(4, float, dinput, pdinput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn); /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); for (i = nc_thr_begin; i < nc_thr_end; ++i) { libxsmm_bfloat16_hp out; out.i[0] = 0; out.i[1] = poutput_bf16[i]; poutput_fp32[i] = out.f; } libxsmm_barrier_wait(cfg.barrier, ltid); for (i = n_thr_begin; i < n_thr_end; ++i) { img1 = i / bn; img2 = i % bn; /* set output to input and set compute max per image */ for (ifm1 = 0; ifm1 < Bc; ++ifm1) { for (ifm2 = 0; ifm2 < bc; ++ifm2) { if ((ifm1 * Bc) + ifm2 == (libxsmm_blasint) LIBXSMM_VLA_ACCESS(2, label, img1, img2, bn)) { LIBXSMM_VLA_ACCESS(4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc) = (LIBXSMM_VLA_ACCESS(4, output, img1, ifm1, img2, ifm2, Bc, bn, bc) - 1.0 f) * rcp_N * cfg.loss_weight; } else { LIBXSMM_VLA_ACCESS(4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc) = LIBXSMM_VLA_ACCESS(4, output, img1, ifm1, img2, ifm2, Bc, bn, bc) * rcp_N * cfg.loss_weight; } } } } libxsmm_barrier_wait(cfg.barrier, ltid); for (i = nc_thr_begin; i < nc_thr_end; ++i) { libxsmm_bfloat16_hp in; in.f = pdinput_fp32[i]; pdinput_bf16[i] = in.i[1]; } libxsmm_barrier_wait(cfg.barrier, ltid); } void init_master_weights(my_opt_config cfg, float *master_wt_ptr, size_t size) { #if 0 if (0 /* && cfg.upd_N_hyperpartitions != 1 */ ) { /* TODO: add hyperpartitions (?) */ /* * Spread out weights in a blocked fasion since we partition the * MODEL dimenstion */ init_buffer_block_numa((libxsmm_bfloat16 *) master_wt_ptr, size / 2); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa((libxsmm_bfloat16 *) master_wt_ptr, size / 2); } #endif } void init_weights(my_fc_fwd_config cfg, libxsmm_bfloat16 * wt_ptr, size_t size) { if (cfg.fwd_M_hyperpartitions != 1) { /* * Spread out weights in a blocked fasion since we partition the * MODEL dimenstion */ init_buffer_block_numa(wt_ptr, size); } else { /* Init weights in a block fashion */ init_buffer_block_cyclic_numa(wt_ptr, size); } } void init_dweights(my_fc_bwd_config cfg, libxsmm_bfloat16 * dwt_ptr, size_t size) { if (cfg.upd_N_hyperpartitions != 1) { /* Spread out weights */ init_buffer_block_numa(dwt_ptr, size); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa(dwt_ptr, size); } } void init_acts(my_fc_fwd_config cfg, libxsmm_bfloat16 * act_ptr, size_t size) { if (cfg.fwd_N_hyperpartitions != 1) { /* Spread out weights */ init_buffer_block_numa(act_ptr, size); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa(act_ptr, size); } } void init_delacts(my_fc_bwd_config cfg, libxsmm_bfloat16 * delact_ptr, size_t size) { if (cfg.bwd_N_hyperpartitions != 1) { /* Spread out weights */ init_buffer_block_numa(delact_ptr, size); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa(delact_ptr, size); } } int main(int argc, char *argv[]) { libxsmm_bfloat16 **act_libxsmm, **fil_libxsmm, **delact_libxsmm, **delfil_libxsmm; libxsmm_bfloat16 **bias_libxsmm, **delbias_libxsmm; float **fil_master; unsigned char **relumask_libxsmm; int *label_libxsmm; my_eltwise_fuse my_fuse; my_fc_fwd_config *my_fc_fwd; my_fc_bwd_config *my_fc_bwd; my_opt_config *my_opt; my_smax_fwd_config my_smax_fwd; my_smax_bwd_config my_smax_bwd; my_vnni_reformat_config my_vnni_reformat; void *scratch = NULL; size_t scratch_size = 0; /* * some parameters we can overwrite via cli, default is some inner layer * of overfeat */ int iters = 10; /* repetitions of benchmark */ int MB = 32; /* mini-batch size, "N" */ int fuse_type = 0; /* 0: nothing fused, 1: relu fused, 2: * elementwise fused, 3: relu and elementwise * fused */ char type = 'A'; /* 'A': ALL, 'F': FP, 'B': BP */ int bn = 64; int bk = 64; int bc = 64; int *C; /* number of input feature maps, "C" */ int num_layers = 0; const char *const env_check = getenv("CHECK"); const double check = LIBXSMM_ABS(0 == env_check ? 1 : atof(env_check)); unsigned long long l_start, l_end; double l_total = 0.0; double gflop = 0.0; int i, j; double act_size = 0.0; double fil_size = 0.0; float lr = 0.1 f; float loss_weight = 1.0 f; float loss = 0.0; libxsmm_matdiff_info norms_fwd, norms_bwd, norms_upd, diff; libxsmm_matdiff_clear(&norms_fwd); libxsmm_matdiff_clear(&norms_bwd); libxsmm_matdiff_clear(&norms_upd); libxsmm_matdiff_clear(&diff); char *env_threads_per_numa; if (argc > 1 && !strncmp(argv[1], "-h", 3)) { printf("Usage: %s iters MB bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* reading new values from cli */ i = 1; num_layers = argc - 7; if (argc > i) iters = atoi(argv[i++]); if (argc > i) MB = atoi(argv[i++]); if (argc > i) bn = atoi(argv[i++]); if (argc > i) bk = atoi(argv[i++]); if (argc > i) bc = atoi(argv[i++]); /* allocate the number of channles buffer */ if (num_layers < 1) { printf("Usage: %s iters MB bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } C = (int *)malloc((num_layers + 2) * sizeof(int)); for (j = 0; i < argc; ++i, ++j) { C[j] = atoi(argv[i]); } /* handle softmax config */ C[num_layers + 1] = C[num_layers]; #if defined(__SSE3__) _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST); #endif /* Read env variables */ env_threads_per_numa = getenv("THREADS_PER_NUMA"); if (0 == env_threads_per_numa) { printf("please specify THREADS_PER_NUMA to a non-zero value!\n"); return -1; } else { threads_per_numa = atoi(env_threads_per_numa); } /* print some summary */ printf("##########################################\n"); printf("# Setting Up (Common) #\n"); printf("##########################################\n"); printf("PARAMS: N:%d\n", MB); printf("PARAMS: Layers: %d\n", num_layers); printf("PARAMS: ITERS:%d", iters); if (LIBXSMM_FEQ(0, check)) printf(" Threads:%d\n", nThreads); else printf("\n"); for (i = 0; i < num_layers; ++i) { if (i == 0) { act_size += (double)(MB * C[i] * sizeof(libxsmm_bfloat16)) / (1024.0 * 1024.0); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i, MB, C[i], (double)(MB * C[i] * sizeof(libxsmm_bfloat16)) / (1024.0 * 1024.0)); } act_size += (double)(MB * C[i + 1] * sizeof(libxsmm_bfloat16)) / (1024.0 * 1024.0); fil_size += (double)(C[i] * C[i + 1] * sizeof(libxsmm_bfloat16)) / (1024.0 * 1024.0); printf("SIZE Filter %i (%dx%d): %10.2f MiB\n", i, C[i], C[i + 1], (double)(C[i] * C[i + 1] * sizeof(libxsmm_bfloat16)) / (1024.0 * 1024.0)); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i + 1, MB, C[i + 1], (double)(MB * C[i + 1] * sizeof(libxsmm_bfloat16)) / (1024.0 * 1024.0)); } act_size += (double)(MB * C[num_layers + 1] * sizeof(float)) / (1024.0 * 1024.0); printf("SIZE Activations softmax (%dx%d): %10.2f MiB\n", MB, C[num_layers + 1], (double)(MB * C[num_layers + 1] * sizeof(libxsmm_bfloat16)) / (1024.0 * 1024.0)); printf("\nTOTAL SIZE Activations: %10.2f MiB\n", act_size); printf("TOTAL SIZE Filter (incl. master): %10.2f MiB\n", 3.0 * fil_size); printf("TOTAL SIZE delActivations: %10.2f MiB\n", act_size); printf("TOTAL SIZE delFilter: %10.2f MiB\n", fil_size); printf("TOTAL SIZE MLP: %10.2f MiB\n", (4.0 * fil_size) + (2.0 * act_size)); /* allocate data */ act_libxsmm = (libxsmm_bfloat16 **) malloc((num_layers + 2) * sizeof(libxsmm_bfloat16 *)); delact_libxsmm = (libxsmm_bfloat16 **) malloc((num_layers + 1) * sizeof(libxsmm_bfloat16 *)); for (i = 0; i < num_layers + 2; ++i) { act_libxsmm[i] = (libxsmm_bfloat16 *) libxsmm_aligned_malloc(MB * C[i] * sizeof(libxsmm_bfloat16), 2097152); /* softmax has no incoming gradients */ if (i < num_layers + 1) { delact_libxsmm[i] = (libxsmm_bfloat16 *) libxsmm_aligned_malloc(MB * C[i] * sizeof(libxsmm_bfloat16), 2097152); } } fil_master = (float **)malloc(num_layers * sizeof(float *)); fil_libxsmm = (libxsmm_bfloat16 **) malloc(num_layers * sizeof(libxsmm_bfloat16 *)); delfil_libxsmm = (libxsmm_bfloat16 **) malloc(num_layers * sizeof(libxsmm_bfloat16 *)); for (i = 0; i < num_layers; ++i) { fil_master[i] = (float *)libxsmm_aligned_malloc(C[i] * C[i + 1] * sizeof(float), 2097152); fil_libxsmm[i] = (libxsmm_bfloat16 *) libxsmm_aligned_malloc(C[i] * C[i + 1] * sizeof(libxsmm_bfloat16), 2097152); delfil_libxsmm[i] = (libxsmm_bfloat16 *) libxsmm_aligned_malloc(C[i] * C[i + 1] * sizeof(libxsmm_bfloat16), 2097152); } bias_libxsmm = (libxsmm_bfloat16 **) malloc(num_layers * sizeof(libxsmm_bfloat16 *)); delbias_libxsmm = (libxsmm_bfloat16 **) malloc(num_layers * sizeof(libxsmm_bfloat16 *)); for (i = 0; i < num_layers; ++i) { bias_libxsmm[i] = (libxsmm_bfloat16 *) libxsmm_aligned_malloc(C[i + 1] * sizeof(libxsmm_bfloat16), 2097152); delbias_libxsmm[i] = (libxsmm_bfloat16 *) libxsmm_aligned_malloc(C[i + 1] * sizeof(libxsmm_bfloat16), 2097152); } relumask_libxsmm = (unsigned char **)malloc(num_layers * sizeof(unsigned char *)); for (i = 0; i < num_layers; ++i) { relumask_libxsmm[i] = (unsigned char *)libxsmm_aligned_malloc(MB * C[i + 1] * sizeof(unsigned char), 2097152); } label_libxsmm = (int *)libxsmm_aligned_malloc(MB * sizeof(int), 2097152); printf("\n"); printf("##########################################\n"); printf("# Setting Up (custom-Storage) #\n"); printf("##########################################\n"); /* allocating handles */ my_fc_fwd = (my_fc_fwd_config *) malloc(num_layers * sizeof(my_fc_fwd_config)); my_fc_bwd = (my_fc_bwd_config *) malloc(num_layers * sizeof(my_fc_bwd_config)); my_opt = (my_opt_config *) malloc(num_layers * sizeof(my_opt_config)); /* setting up handles + scratch */ size_t max_bwd_scratch_size = 0, max_doutput_scratch_mark = 0; scratch_size = 0; /* setting up handles + scratch */ for (i = 0; i < num_layers; ++i) { /* * MNIST Specific where everywhere we use relu act except the last * layer */ if (i < num_layers - 1) { my_fuse = MY_ELTWISE_FUSE_RELU; } else { my_fuse = MY_ELTWISE_FUSE_NONE; } my_fc_fwd[i] = setup_my_fc_fwd(MB, C[i], C[i + 1], (MB % bn == 0) ? bn : MB, (C[i] % bc == 0) ? bc : C[i], (C[i + 1] % bk == 0) ? bk : C[i + 1], nThreads, my_fuse); my_fc_bwd[i] = setup_my_fc_bwd(MB, C[i], C[i + 1], (MB % bn == 0) ? bn : MB, (C[i] % bc == 0) ? bc : C[i], (C[i + 1] % bk == 0) ? bk : C[i + 1], nThreads, my_fuse, lr); my_opt[i] = setup_my_opt(C[i], C[i + 1], (C[i] % bc == 0) ? bc : C[i], (C[i + 1] % bk == 0) ? bk : C[i + 1], nThreads, lr); if (my_fc_bwd[i].scratch_size > 0 && my_fc_bwd[i].scratch_size > max_bwd_scratch_size) { max_bwd_scratch_size = my_fc_bwd[i].scratch_size; } if (my_fc_bwd[i].doutput_scratch_mark > 0 && my_fc_bwd[i].doutput_scratch_mark > max_doutput_scratch_mark) { max_doutput_scratch_mark = my_fc_bwd[i].doutput_scratch_mark; } /* let's allocate and bind scratch */ if (my_fc_fwd[i].scratch_size > 0 || my_fc_bwd[i].scratch_size > 0 || my_opt[i].scratch_size > 0) { size_t alloc_size = LIBXSMM_MAX(LIBXSMM_MAX(my_fc_fwd[i].scratch_size, my_fc_bwd[i].scratch_size), my_opt[i].scratch_size); if (alloc_size > scratch_size) { scratch_size = alloc_size; } } } /* softmax+loss is treated as N+! layer */ my_smax_fwd = setup_my_smax_fwd(MB, C[num_layers + 1], (MB % bn == 0) ? bn : MB, (C[num_layers + 1] % bk == 0) ? bk : C[num_layers + 1], nThreads); my_smax_bwd = setup_my_smax_bwd(MB, C[num_layers + 1], (MB % bn == 0) ? bn : MB, (C[num_layers + 1] % bk == 0) ? bk : C[num_layers + 1], nThreads, loss_weight); my_vnni_reformat = setup_my_vnni_reformat(MB, C[num_layers], (MB % bn == 0) ? bn : MB, (C[num_layers] % bk == 0) ? bk : C[num_layers], nThreads, my_fuse); if (my_smax_fwd.scratch_size > 0 || my_smax_bwd.scratch_size > 0) { size_t alloc_size = LIBXSMM_MAX(my_smax_fwd.scratch_size, my_smax_bwd.scratch_size); if (alloc_size > scratch_size) { scratch_size = alloc_size; } } scratch = libxsmm_aligned_scratch(scratch_size, 2097152); /* init data */ for (i = 0; i < num_layers + 2; ++i) { init_acts(my_fc_fwd[i], act_libxsmm[i], MB * C[i]); } for (i = 0; i < num_layers + 1; ++i) { init_delacts(my_fc_bwd[i], delact_libxsmm[i], MB * C[i]); } for (i = 0; i < num_layers; ++i) { /* init_master_weights(my_opt[i], fil_master[i], C[i]*C[i+1] ); */ my_init_buf(fil_master[i], C[i] * C[i + 1], 0, 0); libxsmm_rne_convert_fp32_bf16(fil_master[i], fil_libxsmm[i], C[i] * C[i + 1]); /* init_weights(my_fc_fwd[i], fil_libxsmm[i], C[i]*C[i+1]); */ init_dweights(my_fc_bwd[i], delfil_libxsmm[i], C[i] * C[i + 1]); } for (i = 0; i < num_layers; ++i) { my_init_buf_bf16(bias_libxsmm[i], C[i + 1], 0, 0); } for (i = 0; i < num_layers; ++i) { my_init_buf_bf16(delbias_libxsmm[i], C[i + 1], 0, 0); } zero_buf_int32(label_libxsmm, MB); /* Reading in the MNIST dataset */ int n_batches = NUM_TRAIN / MB, batch_id = 0; int n_epochs = iters, epoch_id = 0; libxsmm_bfloat16 *input_acts = (libxsmm_bfloat16 *) libxsmm_aligned_malloc(NUM_TRAIN * C[0] * sizeof(libxsmm_bfloat16), 2097152); /* Read in input data */ char *train_image_path = "../mlpdriver/mnist_data/train-images.idx3-ubyte"; char *train_label_path = "../mlpdriver/mnist_data/train-labels.idx1-ubyte"; char *test_image_path = "../mlpdriver/mnist_data/t10k-images.idx3-ubyte"; char *test_label_path = "../mlpdriver/mnist_data/t10k-labels.idx1-ubyte"; load_mnist(train_image_path, train_label_path, test_image_path, test_label_path); /* Format the input layer in NCNC blocked format */ int _i, _j; for (_i = 0; _i < n_batches * MB; _i++) { for (_j = 0; _j < C[0]; _j++) { float val = (_j < 784) ? (float)train_image[_i][_j] : (float)0.0; int batchid = _i / MB; int mb = _i % MB; int _bn = (MB % bn == 0) ? bn : MB; int _bc = (C[0] % bc == 0) ? bc : C[0]; libxsmm_bfloat16 *cur_pos = input_acts + batchid * MB * C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc); libxsmm_rne_convert_fp32_bf16(&val, cur_pos, 1); } } printf("###########################################\n"); printf("# Training MNIST with %d training samples #\n", n_batches * MB); printf("###########################################\n"); l_start = libxsmm_timer_tick(); { for (epoch_id = 0; epoch_id < n_epochs; epoch_id++) { for (batch_id = 0; batch_id < n_batches; batch_id++) { for (i = 0; i < num_layers; ++i) { libxsmm_bfloat16 *input_act_ptr = (i == 0) ? input_acts + batch_id * MB * C[0] : act_libxsmm[i]; my_fc_fwd_exec(my_fc_fwd[i], fil_libxsmm[i], input_act_ptr, act_libxsmm[i + 1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch); } my_smax_fwd_exec(my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers + 1], train_label + batch_id * MB, &loss, 0, tid, scratch); if ((tid == 0) && (batch_id == 0) && (epoch_id % 10 == 0 || epoch_id == n_epochs - 1)) { printf("Loss for epoch %d batch_id %d is %f\n", epoch_id, batch_id, loss); } my_smax_bwd_exec(my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers + 1], train_label + batch_id * MB, 0, tid, scratch); for (i = num_layers - 1; i > 0; --i) { my_fc_bwd_exec(my_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i + 1], delfil_libxsmm[i], act_libxsmm[i], delbias_libxsmm[i], (my_fc_bwd[i].fuse_relu_bwd > 0) ? relumask_libxsmm[i - 1] : relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch, fil_master[i]); } my_fc_bwd_exec(my_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0 + 1], delfil_libxsmm[0], input_acts + batch_id * MB * C[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch, fil_master[0]); } } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); gflop = 0.0; for (i = num_layers - 1; i > 0; --i) { gflop += (6.0 * (double)MB * (double)C[i] * (double)C[i + 1] * (double)((double)n_epochs * (double)n_batches)) / (1000.0 * 1000.0 * 1000.0); } gflop += (4.0 * (double)MB * (double)C[0] * (double)C[1] * (double)((double)n_epochs * (double)n_batches)) / (1000.0 * 1000.0 * 1000.0); printf("GFLOP = %.5g\n", gflop / (double)((double)n_epochs * (double)n_batches)); printf("fp time = %.5g\n", ((double)(l_total / ((double)n_epochs * (double)n_batches)))); printf("GFLOPS = %.5g\n", gflop / l_total); printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB); for (i = 0; i < num_layers; ++i) { printf("%i,", C[i]); } printf("%f,%f\n", ((double)(l_total / ((double)n_epochs * (double)n_batches))), gflop / l_total); #ifdef TEST_ACCURACY /* Test accuracy */ n_batches = NUM_TEST / MB; for (_i = 0; _i < n_batches * MB; _i++) { for (_j = 0; _j < C[0]; _j++) { float val = (_j < 784) ? (float)test_image[_i][_j] : 0.0; int batchid = _i / MB; int mb = _i % MB; int _bn = (MB % bn == 0) ? bn : MB; int _bc = (C[0] % bc == 0) ? bc : C[0]; libxsmm_bfloat16 *cur_pos = input_acts + batchid * MB * C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc); libxsmm_rne_convert_fp32_bf16(&val, cur_pos, 1); } } n_batches = NUM_TEST / MB; unsigned int hits = 0; unsigned int samples = 0; { for (batch_id = 0; batch_id < n_batches; batch_id++) { for (i = 0; i < num_layers; ++i) { libxsmm_bfloat16 *input_act_ptr = (i == 0) ? input_acts + batch_id * MB * C[0] : act_libxsmm[i]; my_fc_fwd_exec(my_fc_fwd[i], fil_libxsmm[i], input_act_ptr, act_libxsmm[i + 1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch); } my_smax_fwd_exec(my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers + 1], test_label + batch_id * MB, &loss, 0, tid, scratch); if (tid == 0) { for (_i = 0; _i < MB; _i++) { int label = *(test_label + batch_id * MB + _i); int max_id = 0; float max_val = 0.0; libxsmm_convert_bf16_f32(act_libxsmm[num_layers + 1] + _i * 10, &max_val, 1); /* Find predicted label */ for (_j = 1; _j < 10; _j++) { libxsmm_bfloat16 val = *(act_libxsmm[num_layers + 1] + _i * 10 + _j); float f32_val; libxsmm_convert_bf16_f32(&val, &f32_val, 1); if (f32_val > max_val) { max_id = _j; max_val = f32_val; } } /* Compare with true label */ if (max_id == label) { hits++; } samples++; } } } } printf("Accuracy is %f %% (%d test samples)\n", (1.0 * hits) / (1.0 * samples) * 100.0, samples); #endif /* deallocate data */ if (scratch != NULL) { libxsmm_free(scratch); } for (i = 0; i < num_layers; ++i) { if (i == 0) { libxsmm_free(act_libxsmm[i]); libxsmm_free(delact_libxsmm[i]); } libxsmm_free(act_libxsmm[i + 1]); libxsmm_free(delact_libxsmm[i + 1]); libxsmm_free(fil_libxsmm[i]); libxsmm_free(delfil_libxsmm[i]); libxsmm_free(bias_libxsmm[i]); libxsmm_free(delbias_libxsmm[i]); libxsmm_free(relumask_libxsmm[i]); libxsmm_free(fil_master[i]); } libxsmm_free(act_libxsmm[num_layers + 1]); libxsmm_free(label_libxsmm); libxsmm_free(input_acts); free(my_opt); free(my_fc_fwd); free(my_fc_bwd); free(act_libxsmm); free(delact_libxsmm); free(fil_master); free(fil_libxsmm); free(delfil_libxsmm); free(bias_libxsmm); free(delbias_libxsmm); free(relumask_libxsmm); free(C); /* some empty lines at the end */ printf("\n\n\n"); return 0; }
/* * Evangelos Georganas, Alexander Heinecke (Intel Corp.) **************************************************************************** */ #include <libxsmm.h> #include <libxsmm_sync.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) #include <omp.h> #endif /* include c-based dnn library */ #include "../common/dnn_common.h" #include "../common/mnist.h" #define TEST_ACCURACY #define OVERWRITE_DOUTPUT_BWDUPD /* #define FUSE_WT_TRANS_SGD */ /* #define FUSE_ACT_TRANS_FWD */ /* #define FUSE_DACT_TRANS_BWD */ #define PRIVATE_WT_TRANS #define PRIVATE_ACT_TRANS #define PRIVATE_DACT_TRANS #define FUSE_SGD_IN_BWD #define _mm512_load_fil(A) _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepi16_epi32(_mm256_loadu_si256((__m256i*)(A))),16)) #define _mm512_store_fil(A,B) _mm256_storeu_si256((__m256i*)(A), (__m256i)LIBXSMM_INTRINSICS_MM512_CVT_FP32_BF16((B))) static int threads_per_numa = 0; LIBXSMM_INLINE void my_init_buf(float *buf, size_t size, int initPos, int initOne) { int i; zero_buf(buf, size); for (i = 0; i < (int)size; ++i) { buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64() / 10.0))); } } LIBXSMM_INLINE void my_init_buf_bf16(libxsmm_bfloat16 * buf, size_t size, int initPos, int initOne) { int i; zero_buf_bf16(buf, size); for (i = 0; i < (int)size; ++i) { libxsmm_bfloat16_hp tmp; tmp.f = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64() / 10.0))); buf[i] = tmp.i[1]; } } LIBXSMM_INLINE void init_buf_bf16_numa_aware(int threads, int ltid, int ft_mode, libxsmm_bfloat16 * buf, size_t size, int initPos, int initOne) { int chunksize, chunks; int my_numa_node = ltid / threads_per_numa; int n_numa_nodes = threads / threads_per_numa; int l = 0; if (ft_mode == 0) { /* Mode 0 : Block cyclic assignment to NUMA nodes */ int bufsize = size * 2; chunksize = 4096; chunks = (bufsize + chunksize - 1) / chunksize; for (l = 0; l < chunks; l++) { int _chunksize = (l < chunks - 1) ? chunksize : bufsize - (chunks - 1) * chunksize; if (l % n_numa_nodes == my_numa_node) { my_init_buf_bf16((libxsmm_bfloat16 *) buf + l * (chunksize / 2), _chunksize / 2, 0, 0); } } } else { /* Mode 1: Block assignement to NUMA nodes */ chunks = n_numa_nodes; chunksize = (size + chunks - 1) / chunks; for (l = 0; l < chunks; l++) { int _chunksize = (l < chunks - 1) ? chunksize : size - (chunks - 1) * chunksize; if (l == my_numa_node) { my_init_buf_bf16((libxsmm_bfloat16 *) buf + l * chunksize, _chunksize, 0, 0); } } } } void init_buffer_block_numa(libxsmm_bfloat16 * buf, size_t size) { int nThreads = omp_get_max_threads(); #if defined(_OPENMP) #pragma omp parallel #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif if (tid % threads_per_numa == 0) { init_buf_bf16_numa_aware(nThreads, tid, 1, buf, size, 0, 0); } } } void init_buffer_block_cyclic_numa(libxsmm_bfloat16 * buf, size_t size) { int nThreads = omp_get_max_threads(); #if defined(_OPENMP) #pragma omp parallel #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif if (tid % threads_per_numa == 0) { init_buf_bf16_numa_aware(nThreads, tid, 0, buf, size, 0, 0); } } } #if 0 LIBXSMM_INLINE void my_matrix_copy_KCCK_to_KCCK_vnni(float *src, float *dst, int C, int K, int bc, int bk) { int k1, k2, c1, c2; int kBlocks = K / bk; int cBlocks = C / bc; LIBXSMM_VLA_DECL(4, float, real_src, src, cBlocks, bc, bk); LIBXSMM_VLA_DECL(5, float, real_dst, dst, cBlocks, bc / 2, bk, 2); for (k1 = 0; k1 < kBlocks; k1++) { for (c1 = 0; c1 < cBlocks; c1++) { for (c2 = 0; c2 < bc; c2++) { for (k2 = 0; k2 < bk; k2++) { LIBXSMM_VLA_ACCESS(5, real_dst, k1, c1, c2 / 2, k2, c2 % 2, cBlocks, bc / 2, bk, 2) = LIBXSMM_VLA_ACCESS(4, real_src, k1, c1, c2, k2, cBlocks, bc, bk); } } } } } #endif typedef enum my_eltwise_fuse { MY_ELTWISE_FUSE_NONE = 0, MY_ELTWISE_FUSE_BIAS = 1, MY_ELTWISE_FUSE_RELU = 2, MY_ELTWISE_FUSE_BIAS_RELU = MY_ELTWISE_FUSE_BIAS | MY_ELTWISE_FUSE_RELU } my_eltwise_fuse; typedef enum my_pass { MY_PASS_FWD = 1, MY_PASS_BWD_D = 2, MY_PASS_BWD_W = 4, MY_PASS_BWD = 6 } my_pass; typedef struct my_opt_config { libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; libxsmm_blasint opt_2d_blocking; libxsmm_blasint opt_col_teams; libxsmm_blasint opt_row_teams; float lr; size_t scratch_size; libxsmm_barrier *barrier; } my_opt_config; typedef struct my_smax_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint threads; size_t scratch_size; libxsmm_barrier *barrier; } my_smax_fwd_config; typedef struct my_smax_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint threads; size_t scratch_size; float loss_weight; libxsmm_barrier *barrier; my_eltwise_fuse fuse_type; } my_smax_bwd_config; typedef struct my_vnni_reformat_config { libxsmm_blasint C; libxsmm_blasint N; libxsmm_blasint bc; libxsmm_blasint bn; libxsmm_blasint threads; libxsmm_barrier *barrier; my_eltwise_fuse fuse_type; libxsmm_meltwfunction_unary norm_to_vnni_kernel; libxsmm_meltwfunction_unary fused_relu_kernel; } my_vnni_reformat_config; typedef struct my_fc_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; my_eltwise_fuse fuse_type; libxsmm_blasint fwd_bf; libxsmm_blasint fwd_2d_blocking; libxsmm_blasint fwd_col_teams; libxsmm_blasint fwd_row_teams; libxsmm_blasint fwd_M_hyperpartitions; libxsmm_blasint fwd_N_hyperpartitions; size_t scratch_size; libxsmm_barrier *barrier; libxsmm_bsmmfunction fwd_config_kernel; libxsmm_bsmmfunction tilerelease_kernel; libxsmm_bsmmfunction_reducebatch_strd gemm_fwd; libxsmm_bsmmfunction_reducebatch_strd gemm_fwd2; libxsmm_bmmfunction_reducebatch_strd gemm_fwd3; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd4; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd5; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd6; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd7; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd8; libxsmm_meltwfunction_unary fwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary fwd_cvtfp32bf16_relu_kernel; libxsmm_meltwfunction_unary fwd_sigmoid_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary fwd_zero_kernel; libxsmm_meltwfunction_unary fwd_copy_bf16fp32_kernel; libxsmm_meltwfunction_unary fwd_colbcast_bf16fp32_copy_kernel; } my_fc_fwd_config; typedef struct my_fc_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; my_eltwise_fuse fuse_type; libxsmm_blasint bwd_bf; libxsmm_blasint bwd_2d_blocking; libxsmm_blasint bwd_col_teams; libxsmm_blasint bwd_row_teams; libxsmm_blasint bwd_M_hyperpartitions; libxsmm_blasint bwd_N_hyperpartitions; libxsmm_blasint upd_bf; libxsmm_blasint upd_2d_blocking; libxsmm_blasint upd_col_teams; libxsmm_blasint upd_row_teams; libxsmm_blasint upd_M_hyperpartitions; libxsmm_blasint upd_N_hyperpartitions; libxsmm_blasint ifm_subtasks; libxsmm_blasint ofm_subtasks; libxsmm_blasint fuse_relu_bwd; size_t bwd_private_tr_wt_scratch_mark; size_t upd_private_tr_act_scratch_mark; size_t upd_private_tr_dact_scratch_mark; size_t scratch_size; size_t doutput_scratch_mark; libxsmm_barrier *barrier; libxsmm_bsmmfunction bwd_config_kernel; libxsmm_bsmmfunction upd_config_kernel; libxsmm_bsmmfunction tilerelease_kernel; libxsmm_bsmmfunction_reducebatch_strd gemm_bwd; libxsmm_bsmmfunction_reducebatch_strd gemm_bwd2; libxsmm_bmmfunction_reducebatch_strd gemm_bwd3; libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_bwd5; libxsmm_meltwfunction_unary bwd_fused_relu_kernel; libxsmm_bsmmfunction_reducebatch_strd gemm_upd; libxsmm_bsmmfunction_reducebatch_strd gemm_upd2; libxsmm_bmmfunction_reducebatch_strd gemm_upd3; libxsmm_meltwfunction_unary bwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary upd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary bwd_relu_kernel; libxsmm_meltwfunction_unary bwd_zero_kernel; libxsmm_meltwfunction_unary upd_zero_kernel; libxsmm_meltwfunction_unary delbias_reduce_kernel; libxsmm_meltwfunction_unary vnni_to_vnniT_kernel; libxsmm_meltwfunction_unary norm_to_normT_kernel; libxsmm_meltwfunction_unary norm_to_vnni_kernel; libxsmm_meltwfunction_unary norm_to_vnni_kernel_wt; float lr; } my_fc_bwd_config; my_vnni_reformat_config setup_my_vnni_reformat(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads, my_eltwise_fuse fuse_type) { my_vnni_reformat_config res; libxsmm_blasint ld = bc; res.N = N; res.C = C; res.bn = bn; res.bc = bc; res.threads = threads; res.fuse_type = fuse_type; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); res.fused_relu_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn, &ld, &ld, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV); if (res.fused_relu_kernel == NULL) { fprintf(stderr, "JIT for TPP fused_relu_kernel failed. Bailing...!\n"); exit(-1); } return res; } my_fc_fwd_config setup_my_fc_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) { my_fc_fwd_config res; libxsmm_blasint lda = bk; libxsmm_blasint ldb = bc; libxsmm_blasint ldc = bk; libxsmm_blasint ld_zero = bk * bn; libxsmm_blasint ld_upconvert = K; float alpha = 1.0 f; float beta = 1.0 f; float zerobeta = 0.0 f; libxsmm_meltw_flags fusion_flags; int l_flags, l_tc_flags; int l_tr_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | (LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N')); libxsmm_blasint unroll_hint; /* setting up some handle values */ res.N = N; res.C = C; res.K = K; res.bn = bn; res.bc = bc; res.bk = bk; res.threads = threads; res.fuse_type = fuse_type; /* setup parallelization strategy */ res.fwd_M_hyperpartitions = 1; res.fwd_N_hyperpartitions = 1; if (threads == 16) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 2; res.fwd_row_teams = 8; } else if (threads == 14) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 2; res.fwd_row_teams = 7; } else if (threads == 56) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_col_teams = 1; res.fwd_row_teams = 14; res.fwd_M_hyperpartitions = 1; res.fwd_N_hyperpartitions = 4; } else { res.fwd_bf = 1; res.fwd_2d_blocking = 0; res.fwd_col_teams = 1; res.fwd_row_teams = 1; } #if 0 res.fwd_bf = atoi(getenv("FWD_BF")); res.fwd_2d_blocking = atoi(getenv("FWD_2D_BLOCKING")); res.fwd_col_teams = atoi(getenv("FWD_COL_TEAMS")); res.fwd_row_teams = atoi(getenv("FWD_ROW_TEAMS")); #endif /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ l_flags = (LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N')) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | (LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N')); unroll_hint = (res.C / res.bc) / res.fwd_bf; res.fwd_config_kernel = libxsmm_bsmmdispatch(res.bk, res.bn, res.bc, &lda, &ldb, &ldc, NULL, &beta, &l_tc_flags, NULL); if (res.fwd_config_kernel == NULL) { fprintf(stderr, "JIT for BRGEMM TPP fwd_config_kernel failed. Bailing...!\n"); exit(-1); } res.gemm_fwd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &beta, &l_flags, NULL); if (res.gemm_fwd == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_fwd failed. Bailing...!\n"); exit(-1); } res.gemm_fwd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if (res.gemm_fwd2 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_fwd2 failed. Bailing...!\n"); exit(-1); } res.gemm_fwd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if (res.gemm_fwd3 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_fwd3 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_OVERWRITE_C; res.gemm_fwd4 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if (res.gemm_fwd4 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_fwd4 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_ACT_RELU_OVERWRITE_C; res.gemm_fwd5 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if (res.gemm_fwd5 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_fwd5 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_ACT_SIGM_OVERWRITE_C; res.gemm_fwd6 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if (res.gemm_fwd6 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_fwd6 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_ACT_RELU_OVERWRITE_C; res.gemm_fwd7 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if (res.gemm_fwd7 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_fwd7 failed. Bailing...!\n"); exit(-1); } fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_ACT_SIGM_OVERWRITE_C; res.gemm_fwd8 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0); if (res.gemm_fwd8 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_fwd8 failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.fwd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if (res.fwd_cvtfp32bf16_kernel == NULL) { fprintf(stderr, "JIT for TPP fwd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_cvtfp32bf16_relu_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK, LIBXSMM_MELTW_TYPE_UNARY_RELU); if (res.fwd_cvtfp32bf16_relu_kernel == NULL) { fprintf(stderr, "JIT for TPP fwd_cvtfp32bf16_relu_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_sigmoid_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_SIGMOID); if (res.fwd_sigmoid_cvtfp32bf16_kernel == NULL) { fprintf(stderr, "JIT for TPP fwd_sigmoid_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.tilerelease_kernel = libxsmm_bsmmdispatch(res.bk, res.bk, res.bk, NULL, NULL, NULL, NULL, NULL, &l_tr_flags, NULL); if (res.tilerelease_kernel == NULL) { fprintf(stderr, "JIT for TPP tilerelease_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_zero_kernel = libxsmm_dispatch_meltw_unary(bn * bk, 1, &ld_zero, &ld_zero, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if (res.fwd_zero_kernel == NULL) { fprintf(stderr, "JIT for TPP fwd_zero_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_colbcast_bf16fp32_copy_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_BCAST_COL, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if (res.fwd_colbcast_bf16fp32_copy_kernel == NULL) { fprintf(stderr, "JIT for TPP fwd_colbcast_bf16fp32_copy_kernel failed. Bailing...!\n"); exit(-1); } res.fwd_copy_bf16fp32_kernel = libxsmm_dispatch_meltw_unary(K, 1, &ld_upconvert, &ld_upconvert, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if (res.fwd_copy_bf16fp32_kernel == NULL) { fprintf(stderr, "JIT for TPP fwd_copy_bf16fp32_kernel failed. Bailing...!\n"); exit(-1); } /* init scratch */ res.scratch_size = sizeof(float) * LIBXSMM_MAX(res.K * res.N, res.threads * LIBXSMM_MAX(res.bk * res.bn, res.K)); return res; } my_fc_bwd_config setup_my_fc_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type, float lr) { my_fc_bwd_config res; libxsmm_blasint lda = bk; libxsmm_blasint ldb = bc; libxsmm_blasint ldc = bk; libxsmm_blasint ld_zero_bwd = bc * bn; libxsmm_blasint ld_zero_upd = bk; libxsmm_blasint delbias_K = K; libxsmm_blasint delbias_N = N; float alpha = 1.0 f; float beta = 1.0 f; float zerobeta = 0.0 f; libxsmm_blasint updM; libxsmm_blasint updN; int l_flags, l_tc_flags; int l_tr_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | (LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N')); libxsmm_blasint unroll_hint; size_t size_bwd_scratch; size_t size_upd_scratch; libxsmm_blasint bbk; libxsmm_blasint bbc; libxsmm_blasint ldaT = bc; libxsmm_blasint ldb_orig = bc; libxsmm_meltw_flags fusion_flags_bwd; libxsmm_meltw_operation bwd_fused_op; /* setting up some handle values */ res.N = N; res.C = C; res.K = K; res.bn = bn; res.bc = bc; res.bk = bk; res.threads = threads; res.fuse_type = fuse_type; res.fuse_relu_bwd = 0; res.lr = lr; /* setup parallelization strategy */ res.bwd_M_hyperpartitions = 1; res.upd_M_hyperpartitions = 1; res.bwd_N_hyperpartitions = 1; res.upd_N_hyperpartitions = 1; if (threads == 16) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 2; res.bwd_row_teams = 8; res.upd_bf = 1; res.upd_2d_blocking = 1; res.upd_col_teams = 2; res.upd_row_teams = 8; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else if (threads == 14) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 2; res.bwd_row_teams = 7; res.upd_bf = 1; res.upd_2d_blocking = 1; res.upd_col_teams = 2; res.upd_row_teams = 7; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else if (threads == 56) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_col_teams = 1; res.bwd_row_teams = 14; res.bwd_M_hyperpartitions = 1; res.bwd_N_hyperpartitions = 4; res.upd_bf = 1; res.upd_2d_blocking = 1; res.upd_col_teams = 1; res.upd_row_teams = 14; res.upd_M_hyperpartitions = 1; res.upd_N_hyperpartitions = 4; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else { res.bwd_bf = 1; res.bwd_2d_blocking = 0; res.bwd_col_teams = 1; res.bwd_row_teams = 1; res.upd_bf = 1; res.upd_2d_blocking = 0; res.upd_col_teams = 1; res.upd_row_teams = 1; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } bbk = (res.upd_2d_blocking == 1) ? bk : bk / res.ofm_subtasks; bbc = (res.upd_2d_blocking == 1) ? bc : bc / res.ifm_subtasks; #if 0 res.bwd_bf = atoi(getenv("BWD_BF")); res.bwd_2d_blocking = atoi(getenv("BWD_2D_BLOCKING")); res.bwd_col_teams = atoi(getenv("BWD_COL_TEAMS")); res.bwd_row_teams = atoi(getenv("BWD_ROW_TEAMS")); res.upd_bf = atoi(getenv("UPD_BF")); res.upd_2d_blocking = atoi(getenv("UPD_2D_BLOCKING")); res.upd_col_teams = atoi(getenv("UPD_COL_TEAMS")); res.upd_row_teams = atoi(getenv("UPD_ROW_TEAMS")); res.ifm_subtasks = atoi(getenv("IFM_SUBTASKS")); res.ofm_subtasks = atoi(getenv("OFM_SUBTASKS")); #endif if (res.bwd_2d_blocking != 1) { printf("Requested private wt transposes, but for the current # of threads the bwd decomposition is not 2D. Will perform upfront/shared wt transposes...\n"); } if (res.upd_2d_blocking != 1) { printf("Requested private act transposes, but for the current # of threads the upd decomposition is not 2D. Will perform upfront/shared act transposes...\n"); } if (res.upd_2d_blocking != 1) { printf("Requested private dact transposes, but for the current # of threads the upd decomposition is not 2D. Will perform upfront/shared dact transposes...\n"); } /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ /* BWD GEMM */ l_flags = (LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N')) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | (LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N')); unroll_hint = (res.K / res.bk) / res.bwd_bf; res.gemm_bwd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bk * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &beta, &l_flags, NULL); if (res.gemm_bwd == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_bwd failed. Bailing...!\n"); exit(-1); } res.gemm_bwd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bk * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL); if (res.gemm_bwd2 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_bwd2 failed. Bailing...!\n"); exit(-1); } res.gemm_bwd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bk * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL); if (res.gemm_bwd3 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_bwd3 failed. Bailing...!\n"); exit(-1); } res.bwd_config_kernel = libxsmm_bsmmdispatch(res.bc, res.bn, res.bk, &ldb, &lda, &ldb, NULL, &beta, &l_tc_flags, NULL); if (res.bwd_config_kernel == NULL) { fprintf(stderr, "JIT for BRGEMM TPP bwd_config_kernel failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.bwd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn, &ldb, &ldb, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if (res.bwd_cvtfp32bf16_kernel == NULL) { fprintf(stderr, "JIT for TPP bwd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.bwd_relu_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV); if (res.bwd_relu_kernel == NULL) { fprintf(stderr, "JIT for TPP bwd_relu_kernel failed. Bailing...!\n"); exit(-1); } res.bwd_zero_kernel = libxsmm_dispatch_meltw_unary(bn * bc, 1, &ld_zero_bwd, &ld_zero_bwd, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if (res.bwd_zero_kernel == NULL) { fprintf(stderr, "JIT for TPP bwd_zero_kernel failed. Bailing...!\n"); exit(-1); } /* JITing the tranpose kernel */ res.vnni_to_vnniT_kernel = libxsmm_dispatch_meltw_unary(bk, bc, &lda, &ldaT, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_VNNI_TO_VNNIT); if (res.vnni_to_vnniT_kernel == NULL) { fprintf(stderr, "JIT for TPP vnni_to_vnniT_kernel failed. Bailing...!\n"); exit(-1); } bwd_fused_op = LIBXSMM_MELTW_OPERATION_COLBIAS_ACT; fusion_flags_bwd = LIBXSMM_MELTW_FLAG_ACT_RELU_BWD_OVERWRITE_C; res.gemm_bwd5 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bc, res.bn, res.bk, res.bk * res.bc * sizeof(libxsmm_bfloat16), res.bk * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL, bwd_fused_op, LIBXSMM_DATATYPE_BF16, fusion_flags_bwd, 0, 0, 0, 0); if (res.gemm_bwd5 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_bwd5 failed. Bailing...!\n"); exit(-1); } if (((fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) && (res.upd_2d_blocking == 1)) { res.fuse_relu_bwd = 1; } res.bwd_fused_relu_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn, &ldb, &ldb, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV); if (res.bwd_fused_relu_kernel == NULL) { fprintf(stderr, "JIT for TPP bwd_fused_relu_kernel failed. Bailing...!\n"); exit(-1); } /* UPD GEMM */ lda = res.bk; ldb = res.bn; ldc = res.bk; updM = res.bk / res.ofm_subtasks; updN = res.bc / res.ifm_subtasks; l_flags = (LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N')) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG; l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | (LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N')); unroll_hint = (res.N / res.bn) / res.upd_bf; res.gemm_upd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk * res.bn * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &beta, &l_flags, NULL); if (res.gemm_upd == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_upd failed. Bailing...!\n"); exit(-1); } res.gemm_upd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk * res.bn * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if (res.gemm_upd2 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_upd2 failed. Bailing...!\n"); exit(-1); } l_flags = l_flags | LIBXSMM_GEMM_FLAG_VNNI_C; res.gemm_upd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk * res.bn * sizeof(libxsmm_bfloat16), res.bc * res.bn * sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL); if (res.gemm_upd3 == NULL) { fprintf(stderr, "JIT for BRGEMM TPP gemm_upd3 failed. Bailing...!\n"); exit(-1); } res.upd_config_kernel = libxsmm_bsmmdispatch(updM, updN, res.bn, &lda, &ldb, &ldc, NULL, &beta, &l_tc_flags, NULL); if (res.upd_config_kernel == NULL) { fprintf(stderr, "JIT for BRGEMM TPP upd_config_kernel failed. Bailing...!\n"); exit(-1); } res.tilerelease_kernel = libxsmm_bsmmdispatch(res.bk, res.bk, res.bk, NULL, NULL, NULL, NULL, NULL, &l_tr_flags, NULL); if (res.tilerelease_kernel == NULL) { fprintf(stderr, "JIT for TPP tilerelease_kernel failed. Bailing...!\n"); exit(-1); } /* Also JIT eltwise TPPs... */ res.upd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(bbk, bbc, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if (res.upd_cvtfp32bf16_kernel == NULL) { fprintf(stderr, "JIT for TPP upd_cvtfp32bf16_kernel failed. Bailing...!\n"); exit(-1); } res.upd_zero_kernel = libxsmm_dispatch_meltw_unary(bbk, bbc, &ld_zero_upd, &ld_zero_upd, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if (res.upd_zero_kernel == NULL) { fprintf(stderr, "JIT for TPP upd_zero_kernel failed. Bailing...!\n"); exit(-1); } res.delbias_reduce_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &delbias_K, &delbias_N, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD_NCNC_FORMAT); if (res.delbias_reduce_kernel == NULL) { fprintf(stderr, "JIT for TPP delbias_reduce_kernel failed. Bailing...!\n"); exit(-1); } /* JITing the tranpose kernels */ res.norm_to_vnni_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &lda, &lda, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI); if (res.norm_to_vnni_kernel == NULL) { fprintf(stderr, "JIT for TPP norm_to_vnni_kernel failed. Bailing...!\n"); exit(-1); } res.norm_to_vnni_kernel_wt = libxsmm_dispatch_meltw_unary(bbk, bbc, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI); if (res.norm_to_vnni_kernel_wt == NULL) { fprintf(stderr, "JIT for TPP norm_to_vnni_kernel failed. Bailing...!\n"); exit(-1); } res.norm_to_normT_kernel = libxsmm_dispatch_meltw_unary(bc, bn, &ldb, &ldb_orig, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_NORMT); if (res.norm_to_normT_kernel == NULL) { fprintf(stderr, "JIT for TPP norm_to_normT_kernel failed. Bailing...!\n"); exit(-1); } /* init scratch */ size_bwd_scratch = sizeof(float) * LIBXSMM_MAX(res.C * res.N, res.threads * res.bc * res.bn) + sizeof(libxsmm_bfloat16) * res.C * res.K; res.bwd_private_tr_wt_scratch_mark = size_bwd_scratch; size_bwd_scratch += res.threads * res.bc * res.K * sizeof(libxsmm_bfloat16); size_upd_scratch = sizeof(float) * LIBXSMM_MAX(res.C * res.K, res.threads * res.bc * res.bk) + sizeof(libxsmm_bfloat16) * res.threads * res.bk * res.bc + sizeof(libxsmm_bfloat16) * (res.N * (res.C + res.K)); res.scratch_size = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) + sizeof(libxsmm_bfloat16) * res.N * res.K; res.doutput_scratch_mark = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch); res.upd_private_tr_dact_scratch_mark = res.scratch_size; res.scratch_size += res.threads * res.bk * res.N * sizeof(libxsmm_bfloat16); res.upd_private_tr_act_scratch_mark = res.scratch_size; res.scratch_size += res.threads * res.bc * res.N * (((res.C / res.bc) + res.upd_col_teams - 1) / res.upd_col_teams) * sizeof(libxsmm_bfloat16); return res; } my_opt_config setup_my_opt(libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, float lr) { my_opt_config res; /* setting up some handle values */ res.C = C; res.K = K; res.bc = bc; res.bk = bk; res.threads = threads; if (threads == 16) { res.opt_2d_blocking = 1; res.opt_col_teams = 2; res.opt_row_teams = 8; } else if (threads == 14) { res.opt_2d_blocking = 1; res.opt_col_teams = 2; res.opt_row_teams = 7; } else { res.opt_2d_blocking = 0; res.opt_col_teams = 1; res.opt_row_teams = 1; } res.lr = lr; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = 0; return res; } my_smax_fwd_config setup_my_smax_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads) { my_smax_fwd_config res; /* setting up some handle values */ res.C = C; res.N = N; res.bc = bc; res.bn = bn; res.threads = threads; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = (sizeof(float) * res.C * res.N * 2);; return res; } my_smax_bwd_config setup_my_smax_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads, float loss_weight) { my_smax_bwd_config res; /* setting up some handle values */ res.C = C; res.N = N; res.bc = bc; res.bn = bn; res.threads = threads; res.loss_weight = loss_weight; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = (sizeof(float) * res.C * res.N * 2); return res; } void my_fc_fwd_exec(my_fc_fwd_config cfg, const libxsmm_bfloat16 * wt_ptr, const libxsmm_bfloat16 * in_act_ptr, libxsmm_bfloat16 * out_act_ptr, const libxsmm_bfloat16 * bias_ptr, unsigned char *relu_ptr, int start_tid, int my_tid, void *scratch) { const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint lpb = 2; const libxsmm_blasint bc_lp = cfg.bc / lpb; /* const libxsmm_blasint bc = cfg.bc; */ libxsmm_blasint use_2d_blocking = cfg.fwd_2d_blocking; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksOFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* loop variables */ libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ifm1 = 0; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, output, out_act_ptr, nBlocksOFm, cfg.bn, cfg.bk); LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, input, in_act_ptr, nBlocksIFm, cfg.bn, cfg.bc); LIBXSMM_VLA_DECL(5, const libxsmm_bfloat16, filter, wt_ptr, nBlocksIFm, bc_lp, cfg.bk, lpb); LIBXSMM_VLA_DECL(4, float, output_f32, (float *)scratch, nBlocksOFm, bn, bk); libxsmm_meltw_gemm_param gemm_eltwise_params; float *fp32_bias_scratch = ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (float *)scratch + ltid * cfg.K : NULL; LIBXSMM_VLA_DECL(2, const libxsmm_bfloat16, bias, ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (libxsmm_bfloat16 *) bias_ptr : NULL, cfg.bk); LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32 *) relu_ptr : NULL, nBlocksOFm, cfg.bn, cfg.bk / 32); libxsmm_meltwfunction_unary eltwise_kernel_act = cfg.fwd_cvtfp32bf16_relu_kernel; libxsmm_meltw_unary_param eltwise_params_act; libxsmm_meltwfunction_unary eltwise_kernel = cfg.fwd_cvtfp32bf16_kernel; libxsmm_meltw_unary_param eltwise_params; libxsmm_bmmfunction_reducebatch_strd_meltwfused bf16_batchreduce_kernel_zerobeta_fused_eltwise; libxsmm_meltw_unary_param copy_params; unsigned long long blocks = nBlocksIFm; libxsmm_blasint CB_BLOCKS = nBlocksIFm, BF = 1; if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) && ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) { bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd7; } else if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd4; } else if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd5; } else { bf16_batchreduce_kernel_zerobeta_fused_eltwise = NULL; } BF = cfg.fwd_bf; CB_BLOCKS = nBlocksIFm / BF; blocks = CB_BLOCKS; if (use_2d_blocking == 1) { int _ltid, M_hyperpartition_id, N_hyperpartition_id, _nBlocksOFm, _nBlocksMB, hyperteam_id; col_teams = cfg.fwd_col_teams; row_teams = cfg.fwd_row_teams; hyperteam_id = ltid / (col_teams * row_teams); _nBlocksOFm = nBlocksOFm / cfg.fwd_M_hyperpartitions; _nBlocksMB = nBlocksMB / cfg.fwd_N_hyperpartitions; _ltid = ltid % (col_teams * row_teams); M_hyperpartition_id = hyperteam_id % cfg.fwd_M_hyperpartitions; N_hyperpartition_id = hyperteam_id / cfg.fwd_M_hyperpartitions; my_row_id = _ltid % row_teams; my_col_id = _ltid / row_teams; N_tasks_per_thread = (_nBlocksMB + col_teams - 1) / col_teams; M_tasks_per_thread = (_nBlocksOFm + row_teams - 1) / row_teams; my_N_start = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN(my_col_id * N_tasks_per_thread, _nBlocksMB); my_N_end = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN((my_col_id + 1) * N_tasks_per_thread, _nBlocksMB); my_M_start = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN(my_row_id * M_tasks_per_thread, _nBlocksOFm); my_M_end = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN((my_row_id + 1) * M_tasks_per_thread, _nBlocksOFm); } /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); cfg.fwd_config_kernel(NULL, NULL, NULL); if (use_2d_blocking == 1) { if (BF > 1) { for (ifm1 = 0; ifm1 < BF; ++ifm1) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { if (ifm1 == 0) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { copy_params.in.primary = (void *)&LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0, cfg.bk); copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); cfg.fwd_colbcast_bf16fp32_copy_kernel(&copy_params); } else { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); cfg.fwd_zero_kernel(&copy_params); } } cfg.gemm_fwd(&LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1 * CB_BLOCKS, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1 * CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); if (ifm1 == BF - 1) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { eltwise_params_act.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk / 32); eltwise_kernel_act(&eltwise_params_act); } else { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_kernel(&eltwise_params); } } } } } } else { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { copy_params.in.primary = (void *)&LIBXSMM_VLA_ACCESS(2, bias, 0, 0, cfg.bk); copy_params.out.primary = fp32_bias_scratch; cfg.fwd_copy_bf16fp32_kernel(&copy_params); } for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) || ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { gemm_eltwise_params.bias_ptr = (float *)fp32_bias_scratch + ofm1 * cfg.bk; } if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { gemm_eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk / 32); } bf16_batchreduce_kernel_zerobeta_fused_eltwise(&LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks, &gemm_eltwise_params); } else { cfg.gemm_fwd3(&LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks); } } } } } else { if (BF > 1) { for (ifm1 = 0; ifm1 < BF; ++ifm1) { for (mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1) { mb1 = mb1ofm1 % nBlocksMB; ofm1 = mb1ofm1 / nBlocksMB; /* Initialize libxsmm_blasintermediate f32 tensor */ if (ifm1 == 0) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { copy_params.in.primary = (void *)&LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0, cfg.bk); copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); cfg.fwd_colbcast_bf16fp32_copy_kernel(&copy_params); } else { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); cfg.fwd_zero_kernel(&copy_params); } } cfg.gemm_fwd(&LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1 * CB_BLOCKS, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1 * CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); if (ifm1 == BF - 1) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { eltwise_params_act.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params_act.out.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk / 32); eltwise_kernel_act(&eltwise_params_act); } else { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); eltwise_kernel(&eltwise_params); } } } } } else { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { copy_params.in.primary = (void *)&LIBXSMM_VLA_ACCESS(2, bias, 0, 0, cfg.bk); copy_params.out.primary = fp32_bias_scratch; cfg.fwd_copy_bf16fp32_kernel(&copy_params); } for (mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1) { mb1 = mb1ofm1 % nBlocksMB; ofm1 = mb1ofm1 / nBlocksMB; if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) || ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) { if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) { gemm_eltwise_params.bias_ptr = (float *)fp32_bias_scratch + ofm1 * cfg.bk; } if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) { gemm_eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk / 32); } bf16_batchreduce_kernel_zerobeta_fused_eltwise(&LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks, &gemm_eltwise_params); } else { cfg.gemm_fwd3(&LIBXSMM_VLA_ACCESS(5, filter, ofm1, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks); } } } } cfg.tilerelease_kernel(NULL, NULL, NULL); libxsmm_barrier_wait(cfg.barrier, ltid); } void my_fc_bwd_exec(my_fc_bwd_config cfg, libxsmm_bfloat16 * wt_ptr, libxsmm_bfloat16 * din_act_ptr, const libxsmm_bfloat16 * dout_act_ptr, libxsmm_bfloat16 * dwt_ptr, const libxsmm_bfloat16 * in_act_ptr, libxsmm_bfloat16 * dbias_ptr, const unsigned char *relu_ptr, my_pass pass, int start_tid, int my_tid, void *scratch, float *fil_master) { /* size variables, all const */ /* here we assume that input and output blocking is similar */ const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint bc = cfg.bc; libxsmm_blasint lpb = 2; const libxsmm_blasint bc_lp = bc / lpb; const libxsmm_blasint bk_lp = bk / lpb; const libxsmm_blasint bn_lp = bn / lpb; const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ofm2 = 0; libxsmm_blasint performed_doutput_transpose = 0; libxsmm_meltw_unary_param trans_param; unsigned int i; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint eltwise_work = nBlocksOFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint eltwise_chunksize = (eltwise_work % cfg.threads == 0) ? (eltwise_work / cfg.threads) : ((eltwise_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint eltwise_thr_begin = (ltid * eltwise_chunksize < eltwise_work) ? (ltid * eltwise_chunksize) : eltwise_work; const libxsmm_blasint eltwise_thr_end = ((ltid + 1) * eltwise_chunksize < eltwise_work) ? ((ltid + 1) * eltwise_chunksize) : eltwise_work; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint dbias_work = nBlocksOFm; /* compute chunk size */ const libxsmm_blasint dbias_chunksize = (dbias_work % cfg.threads == 0) ? (dbias_work / cfg.threads) : ((dbias_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint dbias_thr_begin = (ltid * dbias_chunksize < dbias_work) ? (ltid * dbias_chunksize) : dbias_work; const libxsmm_blasint dbias_thr_end = ((ltid + 1) * dbias_chunksize < dbias_work) ? ((ltid + 1) * dbias_chunksize) : dbias_work; LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, dbias, ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (libxsmm_bfloat16 *) dbias_ptr : NULL, cfg.bk); libxsmm_blasint ext_blocks = (cfg.fuse_relu_bwd == 1) ? nBlocksIFm : nBlocksOFm; libxsmm_blasint int_blocks = (cfg.fuse_relu_bwd == 1) ? cfg.bc : cfg.bk; LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32 *) relu_ptr : NULL, ext_blocks, cfg.bn, int_blocks / 32); libxsmm_bfloat16 *grad_output_ptr = (libxsmm_bfloat16 *) dout_act_ptr; libxsmm_bfloat16 *tr_doutput_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) ? (libxsmm_bfloat16 *) ((char *)scratch + cfg.doutput_scratch_mark) : (libxsmm_bfloat16 *) scratch; LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, doutput_orig, (libxsmm_bfloat16 *) dout_act_ptr, nBlocksOFm, bn, bk); libxsmm_meltw_unary_param relu_params; libxsmm_meltwfunction_unary relu_kernel = cfg.bwd_relu_kernel; LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, doutput, grad_output_ptr, nBlocksOFm, bn, bk); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, doutput_tr, tr_doutput_ptr, nBlocksMB, bn_lp, bk, lpb); libxsmm_meltwfunction_unary eltwise_kernel = cfg.bwd_cvtfp32bf16_kernel; libxsmm_meltwfunction_unary eltwise_kernel2 = cfg.upd_cvtfp32bf16_kernel; libxsmm_meltw_unary_param eltwise_params; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param delbias_params; libxsmm_meltw_gemm_param eltwise_params_bwd; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); cfg.bwd_config_kernel(NULL, NULL, NULL); if (cfg.upd_2d_blocking == 0) { /* Apply to doutput potential fusions */ if (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) { for (mb1ofm1 = eltwise_thr_begin; mb1ofm1 < eltwise_thr_end; ++mb1ofm1) { mb1 = mb1ofm1 / nBlocksOFm; ofm1 = mb1ofm1 % nBlocksOFm; relu_params.in.primary = (void *)&LIBXSMM_VLA_ACCESS(4, doutput_orig, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); relu_params.out.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); relu_params.in.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk / 32); relu_kernel(&relu_params); /* If in UPD pass, also perform transpose of doutput */ if ((pass & MY_PASS_BWD_W) == MY_PASS_BWD_W) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, mb1, 0, 0, 0, nBlocksMB, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } } if ((pass & MY_PASS_BWD_W) == MY_PASS_BWD_W) { performed_doutput_transpose = 1; } libxsmm_barrier_wait(cfg.barrier, ltid); } /* Accumulation of bias happens in f32 */ if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS)) { for (ofm1 = dbias_thr_begin; ofm1 < dbias_thr_end; ++ofm1) { delbias_params.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); delbias_params.out.primary = &LIBXSMM_VLA_ACCESS(2, dbias, ofm1, 0, cfg.bk); cfg.delbias_reduce_kernel(&delbias_params); } /* wait for eltwise to finish */ libxsmm_barrier_wait(cfg.barrier, ltid); } } if ((pass & MY_PASS_BWD_D) == MY_PASS_BWD_D) { libxsmm_blasint use_2d_blocking = cfg.bwd_2d_blocking; /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksIFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint transpose_work = nBlocksIFm * nBlocksOFm; /* compute chunk size */ const libxsmm_blasint transpose_chunksize = (transpose_work % cfg.threads == 0) ? (transpose_work / cfg.threads) : ((transpose_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint transpose_thr_begin = (ltid * transpose_chunksize < transpose_work) ? (ltid * transpose_chunksize) : transpose_work; const libxsmm_blasint transpose_thr_end = ((ltid + 1) * transpose_chunksize < transpose_work) ? ((ltid + 1) * transpose_chunksize) : transpose_work; /* loop variables */ libxsmm_blasint ifm1 = 0, ifm1ofm1 = 0, mb1ifm1 = 0; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter, (libxsmm_bfloat16 *) wt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, dinput, (libxsmm_bfloat16 *) din_act_ptr, nBlocksIFm, bn, bc); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter_tr, (libxsmm_bfloat16 *) scratch, nBlocksOFm, bk_lp, bc, lpb); float *temp_output = (float *)scratch + (cfg.C * cfg.K) / 2; LIBXSMM_VLA_DECL(4, float, dinput_f32, (float *)temp_output, nBlocksIFm, bn, bc); unsigned long long blocks = nBlocksOFm; libxsmm_blasint KB_BLOCKS = nBlocksOFm, BF = 1; BF = cfg.bwd_bf; KB_BLOCKS = nBlocksOFm / BF; blocks = KB_BLOCKS; if (use_2d_blocking == 1) { int _ltid, M_hyperpartition_id, N_hyperpartition_id, _nBlocksIFm, _nBlocksMB, hyperteam_id; col_teams = cfg.bwd_col_teams; row_teams = cfg.bwd_row_teams; hyperteam_id = ltid / (col_teams * row_teams); _nBlocksIFm = nBlocksIFm / cfg.bwd_M_hyperpartitions; _nBlocksMB = nBlocksMB / cfg.bwd_N_hyperpartitions; _ltid = ltid % (col_teams * row_teams); M_hyperpartition_id = hyperteam_id % cfg.bwd_M_hyperpartitions; N_hyperpartition_id = hyperteam_id / cfg.bwd_M_hyperpartitions; my_row_id = _ltid % row_teams; my_col_id = _ltid / row_teams; N_tasks_per_thread = (_nBlocksMB + col_teams - 1) / col_teams; M_tasks_per_thread = (_nBlocksIFm + row_teams - 1) / row_teams; my_N_start = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN(my_col_id * N_tasks_per_thread, _nBlocksMB); my_N_end = N_hyperpartition_id * _nBlocksMB + LIBXSMM_MIN((my_col_id + 1) * N_tasks_per_thread, _nBlocksMB); my_M_start = M_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN(my_row_id * M_tasks_per_thread, _nBlocksIFm); my_M_end = M_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN((my_row_id + 1) * M_tasks_per_thread, _nBlocksIFm); } /* transpose weight */ if (cfg.bwd_2d_blocking == 0) { for (ifm1ofm1 = transpose_thr_begin; ifm1ofm1 < transpose_thr_end; ++ifm1ofm1) { ofm1 = ifm1ofm1 / nBlocksIFm; ifm1 = ifm1ofm1 % nBlocksIFm; trans_param.in.primary = &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb); cfg.vnni_to_vnniT_kernel(&trans_param); } /* wait for transpose to finish */ libxsmm_barrier_wait(cfg.barrier, ltid); } if (use_2d_blocking == 1) { LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, tmp_filter_tr, ((libxsmm_bfloat16 *) ((char *)scratch + cfg.bwd_private_tr_wt_scratch_mark)) + ltid * bc * cfg.K, bk_lp, bc, lpb); if (BF > 1) { for (ofm1 = 0; ofm1 < BF; ++ofm1) { for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) { for (ofm2 = ofm1 * KB_BLOCKS; ofm2 < (ofm1 + 1) * KB_BLOCKS; ofm2++) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(5, filter, ofm2, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, ofm2, 0, 0, 0, bk_lp, bc, lpb); cfg.vnni_to_vnniT_kernel(&trans_param); } for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { /* Initialize libxsmm_blasintermediate f32 tensor */ if (ofm1 == 0) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); cfg.bwd_zero_kernel(&copy_params); } cfg.gemm_bwd(&LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, ofm1 * KB_BLOCKS, 0, 0, 0, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1 * KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); /* * downconvert libxsmm_blasintermediate f32 * tensor to bf 16 and store to final C */ if (ofm1 == BF - 1) { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_kernel(&eltwise_params); if (cfg.fuse_relu_bwd > 0) { relu_params.in.primary = (void *)&LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); relu_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); relu_params.in.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ifm1, 0, 0, nBlocksIFm, cfg.bn, cfg.bc / 32); cfg.bwd_fused_relu_kernel(&relu_params); } } } } } } else { for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) { for (ofm2 = 0; ofm2 < nBlocksOFm; ofm2++) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(5, filter, ofm2, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, ofm2, 0, 0, 0, bk_lp, bc, lpb); cfg.vnni_to_vnniT_kernel(&trans_param); } for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) { if (cfg.fuse_relu_bwd > 0) { eltwise_params_bwd.relu_bitmask_bwd = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ifm1, 0, 0, nBlocksIFm, cfg.bn, cfg.bc / 32); cfg.gemm_bwd5(&LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, 0, 0, 0, 0, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks, &eltwise_params_bwd); } else { cfg.gemm_bwd3(&LIBXSMM_VLA_ACCESS(4, tmp_filter_tr, 0, 0, 0, 0, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } } } else { if (BF > 1) { for (ofm1 = 0; ofm1 < BF; ++ofm1) { for (mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1) { mb1 = mb1ifm1 % nBlocksMB; ifm1 = mb1ifm1 / nBlocksMB; /* Initialize libxsmm_blasintermediate f32 tensor */ if (ofm1 == 0) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); cfg.bwd_zero_kernel(&copy_params); } cfg.gemm_bwd(&LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1 * KB_BLOCKS, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1 * KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); /* * downconvert libxsmm_blasintermediate f32 tensor to * bf 16 and store to final C */ if (ofm1 == BF - 1) { eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); eltwise_kernel(&eltwise_params); } } } } else { for (mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1) { mb1 = mb1ifm1 % nBlocksMB; ifm1 = mb1ifm1 / nBlocksMB; cfg.gemm_bwd3(&LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, 0, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } libxsmm_barrier_wait(cfg.barrier, ltid); } if (cfg.upd_2d_blocking == 1) { /* Accumulation of bias happens in f32 */ if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS)) { for (ofm1 = dbias_thr_begin; ofm1 < dbias_thr_end; ++ofm1) { delbias_params.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk); delbias_params.out.primary = &LIBXSMM_VLA_ACCESS(2, dbias, ofm1, 0, cfg.bk); cfg.delbias_reduce_kernel(&delbias_params); } } } if ((pass & MY_PASS_BWD_W) == MY_PASS_BWD_W) { /* number of tasks that could be run in parallel */ const libxsmm_blasint ofm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ofm_subtasks; const libxsmm_blasint ifm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ifm_subtasks; const libxsmm_blasint bbk = (cfg.upd_2d_blocking == 1) ? bk : bk / ofm_subtasks; const libxsmm_blasint bbc = (cfg.upd_2d_blocking == 1) ? bc : bc / ifm_subtasks; const libxsmm_blasint work = nBlocksIFm * ifm_subtasks * nBlocksOFm * ofm_subtasks; const libxsmm_blasint Cck_work = nBlocksIFm * ifm_subtasks * ofm_subtasks; const libxsmm_blasint Cc_work = nBlocksIFm * ifm_subtasks; /* 2D blocking parameters */ libxsmm_blasint use_2d_blocking = cfg.upd_2d_blocking; libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; libxsmm_blasint BF = cfg.upd_bf; /* loop variables */ libxsmm_blasint ifm1ofm1 = 0, ifm1 = 0, ifm2 = 0, mb3 = 0, bfn = 0, mb1ifm1 = 0; /* Batch reduce related variables */ unsigned long long blocks = nBlocksMB / BF; LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, input, (libxsmm_bfloat16 *) in_act_ptr, nBlocksIFm, bn, bc); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, dfilter, (libxsmm_bfloat16 *) dwt_ptr, nBlocksIFm, bc_lp, bk, lpb); /* * Set up tensors for transposing/scratch before vnni reformatting * dfilter */ libxsmm_bfloat16 *tr_inp_ptr = (libxsmm_bfloat16 *) ((libxsmm_bfloat16 *) scratch + cfg.N * cfg.K); float *dfilter_f32_ptr = (float *)((libxsmm_bfloat16 *) tr_inp_ptr + cfg.N * cfg.C); #ifndef BYPASS_SGD libxsmm_bfloat16 *dfilter_scratch = (libxsmm_bfloat16 *) ((float *)dfilter_f32_ptr + cfg.C * cfg.K) + ltid * bc * bk; #endif LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, input_tr, (libxsmm_bfloat16 *) tr_inp_ptr, nBlocksMB, bc, bn); LIBXSMM_VLA_DECL(4, float, dfilter_f32, (float *)dfilter_f32_ptr, nBlocksIFm, bc, bk); #ifndef BYPASS_SGD LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, dfilter_block, (libxsmm_bfloat16 *) dfilter_scratch, bk); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter, (libxsmm_bfloat16 *) wt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(4, float, master_filter, (float *)fil_master, nBlocksIFm, bc, bk); #endif const libxsmm_blasint tr_out_work = nBlocksMB * nBlocksOFm; const libxsmm_blasint tr_out_chunksize = (tr_out_work % cfg.threads == 0) ? (tr_out_work / cfg.threads) : ((tr_out_work / cfg.threads) + 1); const libxsmm_blasint tr_out_thr_begin = (ltid * tr_out_chunksize < tr_out_work) ? (ltid * tr_out_chunksize) : tr_out_work; const libxsmm_blasint tr_out_thr_end = ((ltid + 1) * tr_out_chunksize < tr_out_work) ? ((ltid + 1) * tr_out_chunksize) : tr_out_work; const libxsmm_blasint tr_inp_work = nBlocksMB * nBlocksIFm; const libxsmm_blasint tr_inp_chunksize = (tr_inp_work % cfg.threads == 0) ? (tr_inp_work / cfg.threads) : ((tr_inp_work / cfg.threads) + 1); const libxsmm_blasint tr_inp_thr_begin = (ltid * tr_inp_chunksize < tr_inp_work) ? (ltid * tr_inp_chunksize) : tr_inp_work; const libxsmm_blasint tr_inp_thr_end = ((ltid + 1) * tr_inp_chunksize < tr_inp_work) ? ((ltid + 1) * tr_inp_chunksize) : tr_inp_work; if (use_2d_blocking == 1) { int _ltid, M_hyperpartition_id, N_hyperpartition_id, _nBlocksOFm, _nBlocksIFm, hyperteam_id; col_teams = cfg.upd_col_teams; row_teams = cfg.upd_row_teams; hyperteam_id = ltid / (col_teams * row_teams); _nBlocksOFm = nBlocksOFm / cfg.upd_M_hyperpartitions; _nBlocksIFm = nBlocksIFm / cfg.upd_N_hyperpartitions; _ltid = ltid % (col_teams * row_teams); M_hyperpartition_id = hyperteam_id % cfg.upd_M_hyperpartitions; N_hyperpartition_id = hyperteam_id / cfg.upd_M_hyperpartitions; my_row_id = _ltid % row_teams; my_col_id = _ltid / row_teams; N_tasks_per_thread = (_nBlocksIFm + col_teams - 1) / col_teams; M_tasks_per_thread = (_nBlocksOFm + row_teams - 1) / row_teams; my_N_start = N_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN(my_col_id * N_tasks_per_thread, _nBlocksIFm); my_N_end = N_hyperpartition_id * _nBlocksIFm + LIBXSMM_MIN((my_col_id + 1) * N_tasks_per_thread, _nBlocksIFm); my_M_start = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN(my_row_id * M_tasks_per_thread, _nBlocksOFm); my_M_end = M_hyperpartition_id * _nBlocksOFm + LIBXSMM_MIN((my_row_id + 1) * M_tasks_per_thread, _nBlocksOFm); } if (cfg.upd_2d_blocking == 0) { /* Required upfront tranposes */ for (mb1ifm1 = tr_inp_thr_begin; mb1ifm1 < tr_inp_thr_end; mb1ifm1++) { mb1 = mb1ifm1 % nBlocksMB; ifm1 = mb1ifm1 / nBlocksMB; trans_param.in.primary = (void *)&LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, mb1, 0, 0, nBlocksMB, bc, bn); cfg.norm_to_normT_kernel(&trans_param); } } if (cfg.upd_2d_blocking == 0) { if (performed_doutput_transpose == 0) { for (mb1ofm1 = tr_out_thr_begin; mb1ofm1 < tr_out_thr_end; mb1ofm1++) { mb1 = mb1ofm1 % nBlocksMB; ofm1 = mb1ofm1 / nBlocksMB; trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, mb1, 0, 0, 0, nBlocksMB, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } } libxsmm_barrier_wait(cfg.barrier, ltid); } if (use_2d_blocking == 1) { LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, tmp_input_tr, ((libxsmm_bfloat16 *) ((char *)scratch + cfg.upd_private_tr_act_scratch_mark)) + ltid * bc * cfg.N * N_tasks_per_thread, nBlocksMB, bc, bn); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, tmp_doutput_tr, ((libxsmm_bfloat16 *) ((char *)scratch + cfg.upd_private_tr_dact_scratch_mark)) + ltid * bk * cfg.N, bn_lp, bk, lpb); ifm2 = 0; ofm2 = 0; if (BF == 1) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { /* Transpose output block */ for (mb3 = 0; mb3 < nBlocksMB; mb3++) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb3, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, mb3, 0, 0, 0, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { /* Transpose input block */ if (ofm1 == my_M_start) { for (mb3 = 0; mb3 < nBlocksMB; mb3++) { trans_param.in.primary = (void *)&LIBXSMM_VLA_ACCESS(4, input, mb3, ifm1, 0, 0, nBlocksIFm, bn, bc); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1 - my_N_start, mb3, 0, 0, nBlocksMB, bc, bn); cfg.norm_to_normT_kernel(&trans_param); } } if ((bc % 16 == 0) && (bk % 16 == 0)) { cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, 0, 0, 0, 0, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1 - my_N_start, 0, 0, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb), &blocks); { __m512 vlr = _mm512_set1_ps(cfg.lr); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float *)&LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for (i = 0; i < bc * bk; i += 16) { __m512 newfilter = _mm512_sub_ps(_mm512_loadu_ps(wt_fp32 + i), _mm512_mul_ps(vlr, _mm512_load_fil((libxsmm_bfloat16 *) dwt_bf16 + i))); _mm512_store_fil(wt_bf16 + i, newfilter); _mm512_storeu_ps(wt_fp32 + i, newfilter); } } } else { cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, 0, 0, 0, 0, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1 - my_N_start, 0, 0, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(2, dfilter_block, 0, 0, bk), &blocks); trans_param.in.primary = &LIBXSMM_VLA_ACCESS(2, dfilter_block, 0, 0, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); cfg.norm_to_vnni_kernel_wt(&trans_param); { __m512 vlr = _mm512_set1_ps(cfg.lr); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float *)&LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for (i = 0; i < bc * bk; i += 16) { __m512 newfilter = _mm512_sub_ps(_mm512_loadu_ps(wt_fp32 + i), _mm512_mul_ps(vlr, _mm512_load_fil((libxsmm_bfloat16 *) dwt_bf16 + i))); _mm512_store_fil(wt_bf16 + i, newfilter); _mm512_storeu_ps(wt_fp32 + i, newfilter); } } } } } } else { for (bfn = 0; bfn < BF; bfn++) { for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { /* Transpose output block */ for (mb3 = bfn * blocks; mb3 < (bfn + 1) * blocks; mb3++) { trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb3, ofm1, 0, 0, nBlocksOFm, bn, bk); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, mb3, 0, 0, 0, bn_lp, bk, lpb); cfg.norm_to_vnni_kernel(&trans_param); } for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { /* Transpose input block */ if (ofm1 == my_M_start) { for (mb3 = bfn * blocks; mb3 < (bfn + 1) * blocks; mb3++) { trans_param.in.primary = (void *)&LIBXSMM_VLA_ACCESS(4, input, mb3, ifm1, 0, 0, nBlocksIFm, bn, bc); trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1 - my_N_start, mb3, 0, 0, nBlocksMB, bc, bn); cfg.norm_to_normT_kernel(&trans_param); } } /* initialize current work task to zero */ if (bfn == 0) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2 * bbc, ofm2 * bbk, nBlocksIFm, bc, bk); cfg.upd_zero_kernel(&copy_params); } cfg.gemm_upd(&LIBXSMM_VLA_ACCESS(4, tmp_doutput_tr, bfn * blocks, 0, 0, 0, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, tmp_input_tr, ifm1 - my_N_start, bfn * blocks, ifm2 * bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2 * bbc, ofm2 * bbk, nBlocksIFm, bc, bk), &blocks); /* Downconvert result to BF16 and vnni format */ if (bfn == BF - 1) { LIBXSMM_ALIGNED(libxsmm_bfloat16 tmp_buf[bc][bk], 64); eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); eltwise_params.out.primary = tmp_buf; trans_param.in.primary = tmp_buf; trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); eltwise_kernel2(&eltwise_params); cfg.norm_to_vnni_kernel_wt(&trans_param); #ifndef BYPASS_SGD { __m512 vlr = _mm512_set1_ps(cfg.lr); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float *)&LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for (i = 0; i < bc * bk; i += 16) { __m512 newfilter = _mm512_sub_ps(_mm512_loadu_ps(wt_fp32 + i), _mm512_mul_ps(vlr, _mm512_load_fil((libxsmm_bfloat16 *) dwt_bf16 + i))); _mm512_store_fil(wt_bf16 + i, newfilter); _mm512_storeu_ps(wt_fp32 + i, newfilter); } } #endif } } } } } } else { if (BF == 1) { for (ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1) { ofm1 = ifm1ofm1 / Cck_work; ofm2 = (ifm1ofm1 % Cck_work) / Cc_work; ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks; ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks; cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, 0, 0, ofm2 * bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, 0, ifm2 * bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, (ifm2 * bbc) / lpb, ofm2 * bbk, 0, nBlocksIFm, bc_lp, bk, lpb), &blocks); #ifndef BYPASS_SGD { __m512 vlr = _mm512_set1_ps(cfg.lr); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float *)&LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for (i = 0; i < bc * bk; i += 16) { __m512 newfilter = _mm512_sub_ps(_mm512_loadu_ps(wt_fp32 + i), _mm512_mul_ps(vlr, _mm512_load_fil((libxsmm_bfloat16 *) dwt_bf16 + i))); _mm512_store_fil(wt_bf16 + i, newfilter); _mm512_storeu_ps(wt_fp32 + i, newfilter); } } #endif } } else { for (bfn = 0; bfn < BF; bfn++) { for (ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1) { ofm1 = ifm1ofm1 / Cck_work; ofm2 = (ifm1ofm1 % Cck_work) / Cc_work; ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks; ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks; /* initialize current work task to zero */ if (bfn == 0) { copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2 * bbc, ofm2 * bbk, nBlocksIFm, bc, bk); cfg.upd_zero_kernel(&copy_params); } cfg.gemm_upd(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, bfn * blocks, 0, ofm2 * bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, bfn * blocks, ifm2 * bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2 * bbc, ofm2 * bbk, nBlocksIFm, bc, bk), &blocks); /* Downconvert result to BF16 and vnni format */ if (bfn == BF - 1) { LIBXSMM_ALIGNED(libxsmm_bfloat16 tmp_buf[bc][bk], 64); eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2 * bbc, ofm2 * bbk, nBlocksIFm, bc, bk); eltwise_params.out.primary = tmp_buf; trans_param.in.primary = tmp_buf; trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, (ifm2 * bbc) / lpb, ofm2 * bbk, 0, nBlocksIFm, bc_lp, bk, lpb); eltwise_kernel2(&eltwise_params); cfg.norm_to_vnni_kernel_wt(&trans_param); #ifndef BYPASS_SGD { __m512 vlr = _mm512_set1_ps(cfg.lr); libxsmm_bfloat16 *dwt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); libxsmm_bfloat16 *wt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); float *wt_fp32 = (float *)&LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for (i = 0; i < bc * bk; i += 16) { __m512 newfilter = _mm512_sub_ps(_mm512_loadu_ps(wt_fp32 + i), _mm512_mul_ps(vlr, _mm512_load_fil((libxsmm_bfloat16 *) dwt_bf16 + i))); _mm512_store_fil(wt_bf16 + i, newfilter); _mm512_storeu_ps(wt_fp32 + i, newfilter); } } #endif } } } } } libxsmm_barrier_wait(cfg.barrier, ltid); } cfg.tilerelease_kernel(NULL, NULL, NULL); } void my_opt_exec(my_opt_config cfg, libxsmm_bfloat16 * wt_ptr, float *master_wt_ptr, const libxsmm_bfloat16 * delwt_ptr, int start_tid, int my_tid, void *scratch) { /* loop counters */ libxsmm_blasint i; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint bc = cfg.bc; libxsmm_blasint lpb = 2; const libxsmm_blasint bc_lp = bc / lpb; const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; /* number of tasks that could run in parallel for the filters */ const libxsmm_blasint work = cfg.C * cfg.K; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); #if defined(__AVX512BW__) __m512 vlr = _mm512_set1_ps(cfg.lr); if (cfg.opt_2d_blocking == 1) { libxsmm_blasint ofm1, ifm1; libxsmm_blasint col_teams = cfg.opt_col_teams; libxsmm_blasint row_teams = cfg.opt_row_teams; libxsmm_blasint my_row_id = ltid % row_teams; libxsmm_blasint my_col_id = ltid / row_teams; libxsmm_blasint N_tasks_per_thread = (nBlocksIFm + col_teams - 1) / col_teams; libxsmm_blasint M_tasks_per_thread = (nBlocksOFm + row_teams - 1) / row_teams; libxsmm_blasint my_N_start = LIBXSMM_MIN(my_col_id * N_tasks_per_thread, nBlocksIFm); libxsmm_blasint my_N_end = LIBXSMM_MIN((my_col_id + 1) * N_tasks_per_thread, nBlocksIFm); libxsmm_blasint my_M_start = LIBXSMM_MIN(my_row_id * M_tasks_per_thread, nBlocksOFm); libxsmm_blasint my_M_end = LIBXSMM_MIN((my_row_id + 1) * M_tasks_per_thread, nBlocksOFm); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, dfilter, (libxsmm_bfloat16 *) delwt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter, (libxsmm_bfloat16 *) wt_ptr, nBlocksIFm, bc_lp, bk, lpb); LIBXSMM_VLA_DECL(4, float, master_filter, (float *)master_wt_ptr, nBlocksIFm, bc, bk); libxsmm_bfloat16 *wt_bf16, *dwt_bf16; float *wt_fp32; for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) { for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) { dwt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); wt_bf16 = (libxsmm_bfloat16 *) & LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb); wt_fp32 = (float *)&LIBXSMM_VLA_ACCESS(4, master_filter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk); for (i = 0; i < bc * bk; i += 16) { __m512 newfilter = _mm512_sub_ps(_mm512_loadu_ps(wt_fp32 + i), _mm512_mul_ps(vlr, _mm512_load_fil((libxsmm_bfloat16 *) dwt_bf16 + i))); _mm512_store_fil(wt_bf16 + i, newfilter); _mm512_storeu_ps(wt_fp32 + i, newfilter); } } } } else { libxsmm_blasint iv = ((thr_end - thr_begin) / 16) * 16; /* compute iterations * which are * vectorizable */ for (i = thr_begin; i < thr_begin + iv; i += 16) { __m512 newfilter = _mm512_sub_ps(_mm512_loadu_ps(master_wt_ptr + i), _mm512_mul_ps(vlr, _mm512_load_fil(delwt_ptr + i))); _mm512_store_fil(wt_ptr + i, newfilter); _mm512_storeu_ps(master_wt_ptr + i, newfilter); } for (i = thr_begin + iv; i < thr_end; ++i) { libxsmm_bfloat16_hp t1, t2; t1.i[0] = 0; t1.i[1] = delwt_ptr[i]; master_wt_ptr[i] = master_wt_ptr[i] - (cfg.lr * t1.f); t2.f = master_wt_ptr[i]; wt_ptr[i] = t2.i[1]; } } #else for (i = thr_begin; i < thr_end; ++i) { libxsmm_bfloat16_hp t1, t2; t1.i[0] = 0; t1.i[1] = delwt_ptr[i]; master_wt_ptr[i] = master_wt_ptr[i] - (cfg.lr * t1.f); t2.f = master_wt_ptr[i]; wt_ptr[i] = t2.i[1]; } #endif libxsmm_barrier_wait(cfg.barrier, ltid); } void my_smax_fwd_exec(my_smax_fwd_config cfg, const libxsmm_bfloat16 * in_act_ptr, libxsmm_bfloat16 * out_act_ptr, const int *label_ptr, float *loss, int start_tid, int my_tid, void *scratch) { libxsmm_blasint bn = cfg.bn; libxsmm_blasint Bn = cfg.N / cfg.bn; libxsmm_blasint bc = cfg.bc; libxsmm_blasint Bc = cfg.C / cfg.bc; /* loop counters */ libxsmm_blasint i = 0; libxsmm_blasint img1, img2, ifm1, ifm2; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint n_work = Bn * bn; /* compute chunk size */ const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work; const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint nc_work = Bn * bn * Bc * bc; /* compute chunk size */ const libxsmm_blasint nc_chunksize = (nc_work % cfg.threads == 0) ? (nc_work / cfg.threads) : ((nc_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint nc_thr_begin = (ltid * nc_chunksize < nc_work) ? (ltid * nc_chunksize) : nc_work; const libxsmm_blasint nc_thr_end = ((ltid + 1) * nc_chunksize < nc_work) ? ((ltid + 1) * nc_chunksize) : nc_work; libxsmm_bfloat16 *poutput_bf16 = out_act_ptr; const libxsmm_bfloat16 *pinput_bf16 = in_act_ptr; float *poutput_fp32 = (float *)scratch; float *pinput_fp32 = ((float *)scratch) + (cfg.N * cfg.C); LIBXSMM_VLA_DECL(4, float, output, poutput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(4, const float, input, pinput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn); /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); for (i = nc_thr_begin; i < nc_thr_end; ++i) { libxsmm_bfloat16_hp in; in.i[0] = 0; in.i[1] = pinput_bf16[i]; pinput_fp32[i] = in.f; } libxsmm_barrier_wait(cfg.barrier, ltid); for (i = n_thr_begin; i < n_thr_end; ++i) { float max = FLT_MIN; float sum_of_exp = 0.0 f; img1 = i / bn; img2 = i % bn; /* set output to input and set compute max per image */ for (ifm1 = 0; ifm1 < Bc; ++ifm1) { for (ifm2 = 0; ifm2 < bc; ++ifm2) { LIBXSMM_VLA_ACCESS(4, output, img1, ifm1, img2, ifm2, Bc, bn, bc) = LIBXSMM_VLA_ACCESS(4, input, img1, ifm1, img2, ifm2, Bc, bn, bc); if (LIBXSMM_VLA_ACCESS(4, input, img1, ifm1, img2, ifm2, Bc, bn, bc) > max) { max = LIBXSMM_VLA_ACCESS(4, input, img1, ifm1, img2, ifm2, Bc, bn, bc); } } } /* sum exp over outputs */ for (ifm1 = 0; ifm1 < Bc; ++ifm1) { for (ifm2 = 0; ifm2 < bc; ++ifm2) { LIBXSMM_VLA_ACCESS(4, output, img1, ifm1, img2, ifm2, Bc, bn, bc) = (float)exp((double)(LIBXSMM_VLA_ACCESS(4, output, img1, ifm1, img2, ifm2, Bc, bn, bc) - max)); sum_of_exp += LIBXSMM_VLA_ACCESS(4, output, img1, ifm1, img2, ifm2, Bc, bn, bc); } } /* scale output */ sum_of_exp = 1.0 f / sum_of_exp; for (ifm1 = 0; ifm1 < Bc; ++ifm1) { for (ifm2 = 0; ifm2 < bc; ++ifm2) { LIBXSMM_VLA_ACCESS(4, output, img1, ifm1, img2, ifm2, Bc, bn, bc) = LIBXSMM_VLA_ACCESS(4, output, img1, ifm1, img2, ifm2, Bc, bn, bc) * sum_of_exp; } } } libxsmm_barrier_wait(cfg.barrier, ltid); /* calculate loss single threaded */ if (ltid == 0) { (*loss) = 0.0 f; for (img1 = 0; img1 < Bn; ++img1) { for (img2 = 0; img2 < bn; ++img2) { libxsmm_blasint ifm = (libxsmm_blasint) LIBXSMM_VLA_ACCESS(2, label, img1, img2, bn); libxsmm_blasint ifm1b = ifm / bc; libxsmm_blasint ifm2b = ifm % bc; float val = (LIBXSMM_VLA_ACCESS(4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc) > FLT_MIN) ? LIBXSMM_VLA_ACCESS(4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc) : FLT_MIN; *loss += LIBXSMM_LOGF(val); } } *loss = ((-1.0 f) * (*loss)) / cfg.N; } libxsmm_barrier_wait(cfg.barrier, ltid); for (i = nc_thr_begin; i < nc_thr_end; ++i) { libxsmm_bfloat16_hp in; in.f = poutput_fp32[i]; poutput_bf16[i] = in.i[1]; } libxsmm_barrier_wait(cfg.barrier, ltid); } void my_vnni_reformat_exec(my_vnni_reformat_config cfg, libxsmm_bfloat16 * delin_act_ptr, libxsmm_bfloat16 * tr_delin_act_ptr, unsigned char *relu_ptr, int start_tid, int my_tid) { /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bc = cfg.bc; const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; libxsmm_blasint lpb = 2; const libxsmm_blasint bn_lp = bn / lpb; libxsmm_blasint mb1ifm1, mb1, ifm1; libxsmm_meltw_unary_param trans_param; libxsmm_meltw_unary_param relu_params; LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32 *) relu_ptr : NULL, nBlocksIFm, cfg.bn, cfg.bc / 32); LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, tr_dinput, (libxsmm_bfloat16 *) tr_delin_act_ptr, nBlocksMB, bn_lp, bc, lpb); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, dinput, (libxsmm_bfloat16 *) delin_act_ptr, nBlocksIFm, bn, bc); /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksIFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; LIBXSMM_UNUSED(trans_param); LIBXSMM_UNUSED(tr_dinput_); /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); for (mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1) { mb1 = mb1ifm1 % nBlocksMB; ifm1 = mb1ifm1 / nBlocksMB; if (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) { relu_params.in.primary = (void *)&LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); relu_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc); relu_params.in.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ifm1, 0, 0, nBlocksIFm, cfg.bn, cfg.bc / 32); cfg.fused_relu_kernel(&relu_params); } } libxsmm_barrier_wait(cfg.barrier, ltid); } void my_smax_bwd_exec(my_smax_bwd_config cfg, libxsmm_bfloat16 * delin_act_ptr, const libxsmm_bfloat16 * out_act_ptr, const int *label_ptr, int start_tid, int my_tid, void *scratch) { libxsmm_blasint bn = cfg.bn; libxsmm_blasint Bn = cfg.N / cfg.bn; libxsmm_blasint bc = cfg.bc; libxsmm_blasint Bc = cfg.C / cfg.bc; /* loop counters */ libxsmm_blasint i = 0; libxsmm_blasint img1, img2, ifm1, ifm2; float rcp_N = 1.0 f / cfg.N; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint n_work = Bn * bn; /* compute chunk size */ const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work; const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint nc_work = Bn * bn * Bc * bc; /* compute chunk size */ const int nc_chunksize = (nc_work % cfg.threads == 0) ? (nc_work / cfg.threads) : ((nc_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const int nc_thr_begin = (ltid * nc_chunksize < nc_work) ? (ltid * nc_chunksize) : nc_work; const int nc_thr_end = ((ltid + 1) * nc_chunksize < nc_work) ? ((ltid + 1) * nc_chunksize) : nc_work; const libxsmm_bfloat16 *poutput_bf16 = out_act_ptr; libxsmm_bfloat16 *pdinput_bf16 = delin_act_ptr; float *poutput_fp32 = (float *)scratch; float *pdinput_fp32 = ((float *)scratch) + (cfg.N * cfg.C); LIBXSMM_VLA_DECL(4, const float, output, poutput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(4, float, dinput, pdinput_fp32, Bc, bn, bc); LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn); /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); for (i = nc_thr_begin; i < nc_thr_end; ++i) { libxsmm_bfloat16_hp out; out.i[0] = 0; out.i[1] = poutput_bf16[i]; poutput_fp32[i] = out.f; } libxsmm_barrier_wait(cfg.barrier, ltid); for (i = n_thr_begin; i < n_thr_end; ++i) { img1 = i / bn; img2 = i % bn; /* set output to input and set compute max per image */ for (ifm1 = 0; ifm1 < Bc; ++ifm1) { for (ifm2 = 0; ifm2 < bc; ++ifm2) { if ((ifm1 * Bc) + ifm2 == (libxsmm_blasint) LIBXSMM_VLA_ACCESS(2, label, img1, img2, bn)) { LIBXSMM_VLA_ACCESS(4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc) = (LIBXSMM_VLA_ACCESS(4, output, img1, ifm1, img2, ifm2, Bc, bn, bc) - 1.0 f) * rcp_N * cfg.loss_weight; } else { LIBXSMM_VLA_ACCESS(4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc) = LIBXSMM_VLA_ACCESS(4, output, img1, ifm1, img2, ifm2, Bc, bn, bc) * rcp_N * cfg.loss_weight; } } } } libxsmm_barrier_wait(cfg.barrier, ltid); for (i = nc_thr_begin; i < nc_thr_end; ++i) { libxsmm_bfloat16_hp in; in.f = pdinput_fp32[i]; pdinput_bf16[i] = in.i[1]; } libxsmm_barrier_wait(cfg.barrier, ltid); } void init_master_weights(my_opt_config cfg, float *master_wt_ptr, size_t size) { #if 0 if (0 /* && cfg.upd_N_hyperpartitions != 1 */ ) { /* TODO: add hyperpartitions (?) */ /* * Spread out weights in a blocked fasion since we partition the * MODEL dimenstion */ init_buffer_block_numa((libxsmm_bfloat16 *) master_wt_ptr, size / 2); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa((libxsmm_bfloat16 *) master_wt_ptr, size / 2); } #endif } void init_weights(my_fc_fwd_config cfg, libxsmm_bfloat16 * wt_ptr, size_t size) { if (cfg.fwd_M_hyperpartitions != 1) { /* * Spread out weights in a blocked fasion since we partition the * MODEL dimenstion */ init_buffer_block_numa(wt_ptr, size); } else { /* Init weights in a block fashion */ init_buffer_block_cyclic_numa(wt_ptr, size); } } void init_dweights(my_fc_bwd_config cfg, libxsmm_bfloat16 * dwt_ptr, size_t size) { if (cfg.upd_N_hyperpartitions != 1) { /* Spread out weights */ init_buffer_block_numa(dwt_ptr, size); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa(dwt_ptr, size); } } void init_acts(my_fc_fwd_config cfg, libxsmm_bfloat16 * act_ptr, size_t size) { if (cfg.fwd_N_hyperpartitions != 1) { /* Spread out weights */ init_buffer_block_numa(act_ptr, size); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa(act_ptr, size); } } void init_delacts(my_fc_bwd_config cfg, libxsmm_bfloat16 * delact_ptr, size_t size) { if (cfg.bwd_N_hyperpartitions != 1) { /* Spread out weights */ init_buffer_block_numa(delact_ptr, size); } else { /* Init weights in a block-cyclic fashion */ init_buffer_block_cyclic_numa(delact_ptr, size); } } int main(int argc, char *argv[]) { libxsmm_bfloat16 **act_libxsmm, **fil_libxsmm, **delact_libxsmm, **delfil_libxsmm; libxsmm_bfloat16 **bias_libxsmm, **delbias_libxsmm; float **fil_master; unsigned char **relumask_libxsmm; int *label_libxsmm; my_eltwise_fuse my_fuse; my_fc_fwd_config *my_fc_fwd; my_fc_bwd_config *my_fc_bwd; my_opt_config *my_opt; my_smax_fwd_config my_smax_fwd; my_smax_bwd_config my_smax_bwd; my_vnni_reformat_config my_vnni_reformat; void *scratch = NULL; size_t scratch_size = 0; /* * some parameters we can overwrite via cli, default is some inner layer * of overfeat */ int iters = 10; /* repetitions of benchmark */ int MB = 32; /* mini-batch size, "N" */ int fuse_type = 0; /* 0: nothing fused, 1: relu fused, 2: * elementwise fused, 3: relu and elementwise * fused */ char type = 'A'; /* 'A': ALL, 'F': FP, 'B': BP */ int bn = 64; int bk = 64; int bc = 64; int *C; /* number of input feature maps, "C" */ int num_layers = 0; const char *const env_check = getenv("CHECK"); const double check = LIBXSMM_ABS(0 == env_check ? 1 : atof(env_check)); #if defined(_OPENMP) int nThreads = omp_get_max_threads(); /* number of threads */ #else int nThreads = 1; /* number of threads */ #endif unsigned long long l_start, l_end; double l_total = 0.0; double gflop = 0.0; int i, j; double act_size = 0.0; double fil_size = 0.0; float lr = 0.1 f; float loss_weight = 1.0 f; float loss = 0.0; libxsmm_matdiff_info norms_fwd, norms_bwd, norms_upd, diff; libxsmm_matdiff_clear(&norms_fwd); libxsmm_matdiff_clear(&norms_bwd); libxsmm_matdiff_clear(&norms_upd); libxsmm_matdiff_clear(&diff); char *env_threads_per_numa; if (argc > 1 && !strncmp(argv[1], "-h", 3)) { printf("Usage: %s iters MB bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* reading new values from cli */ i = 1; num_layers = argc - 7; if (argc > i) iters = atoi(argv[i++]); if (argc > i) MB = atoi(argv[i++]); if (argc > i) bn = atoi(argv[i++]); if (argc > i) bk = atoi(argv[i++]); if (argc > i) bc = atoi(argv[i++]); /* allocate the number of channles buffer */ if (num_layers < 1) { printf("Usage: %s iters MB bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } C = (int *)malloc((num_layers + 2) * sizeof(int)); for (j = 0; i < argc; ++i, ++j) { C[j] = atoi(argv[i]); } /* handle softmax config */ C[num_layers + 1] = C[num_layers]; #if defined(__SSE3__) _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST); #endif /* Read env variables */ env_threads_per_numa = getenv("THREADS_PER_NUMA"); if (0 == env_threads_per_numa) { printf("please specify THREADS_PER_NUMA to a non-zero value!\n"); return -1; } else { threads_per_numa = atoi(env_threads_per_numa); } /* print some summary */ printf("##########################################\n"); printf("# Setting Up (Common) #\n"); printf("##########################################\n"); printf("PARAMS: N:%d\n", MB); printf("PARAMS: Layers: %d\n", num_layers); printf("PARAMS: ITERS:%d", iters); if (LIBXSMM_FEQ(0, check)) printf(" Threads:%d\n", nThreads); else printf("\n"); for (i = 0; i < num_layers; ++i) { if (i == 0) { act_size += (double)(MB * C[i] * sizeof(libxsmm_bfloat16)) / (1024.0 * 1024.0); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i, MB, C[i], (double)(MB * C[i] * sizeof(libxsmm_bfloat16)) / (1024.0 * 1024.0)); } act_size += (double)(MB * C[i + 1] * sizeof(libxsmm_bfloat16)) / (1024.0 * 1024.0); fil_size += (double)(C[i] * C[i + 1] * sizeof(libxsmm_bfloat16)) / (1024.0 * 1024.0); printf("SIZE Filter %i (%dx%d): %10.2f MiB\n", i, C[i], C[i + 1], (double)(C[i] * C[i + 1] * sizeof(libxsmm_bfloat16)) / (1024.0 * 1024.0)); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i + 1, MB, C[i + 1], (double)(MB * C[i + 1] * sizeof(libxsmm_bfloat16)) / (1024.0 * 1024.0)); } act_size += (double)(MB * C[num_layers + 1] * sizeof(float)) / (1024.0 * 1024.0); printf("SIZE Activations softmax (%dx%d): %10.2f MiB\n", MB, C[num_layers + 1], (double)(MB * C[num_layers + 1] * sizeof(libxsmm_bfloat16)) / (1024.0 * 1024.0)); printf("\nTOTAL SIZE Activations: %10.2f MiB\n", act_size); printf("TOTAL SIZE Filter (incl. master): %10.2f MiB\n", 3.0 * fil_size); printf("TOTAL SIZE delActivations: %10.2f MiB\n", act_size); printf("TOTAL SIZE delFilter: %10.2f MiB\n", fil_size); printf("TOTAL SIZE MLP: %10.2f MiB\n", (4.0 * fil_size) + (2.0 * act_size)); /* allocate data */ act_libxsmm = (libxsmm_bfloat16 **) malloc((num_layers + 2) * sizeof(libxsmm_bfloat16 *)); delact_libxsmm = (libxsmm_bfloat16 **) malloc((num_layers + 1) * sizeof(libxsmm_bfloat16 *)); for (i = 0; i < num_layers + 2; ++i) { act_libxsmm[i] = (libxsmm_bfloat16 *) libxsmm_aligned_malloc(MB * C[i] * sizeof(libxsmm_bfloat16), 2097152); /* softmax has no incoming gradients */ if (i < num_layers + 1) { delact_libxsmm[i] = (libxsmm_bfloat16 *) libxsmm_aligned_malloc(MB * C[i] * sizeof(libxsmm_bfloat16), 2097152); } } fil_master = (float **)malloc(num_layers * sizeof(float *)); fil_libxsmm = (libxsmm_bfloat16 **) malloc(num_layers * sizeof(libxsmm_bfloat16 *)); delfil_libxsmm = (libxsmm_bfloat16 **) malloc(num_layers * sizeof(libxsmm_bfloat16 *)); for (i = 0; i < num_layers; ++i) { fil_master[i] = (float *)libxsmm_aligned_malloc(C[i] * C[i + 1] * sizeof(float), 2097152); fil_libxsmm[i] = (libxsmm_bfloat16 *) libxsmm_aligned_malloc(C[i] * C[i + 1] * sizeof(libxsmm_bfloat16), 2097152); delfil_libxsmm[i] = (libxsmm_bfloat16 *) libxsmm_aligned_malloc(C[i] * C[i + 1] * sizeof(libxsmm_bfloat16), 2097152); } bias_libxsmm = (libxsmm_bfloat16 **) malloc(num_layers * sizeof(libxsmm_bfloat16 *)); delbias_libxsmm = (libxsmm_bfloat16 **) malloc(num_layers * sizeof(libxsmm_bfloat16 *)); for (i = 0; i < num_layers; ++i) { bias_libxsmm[i] = (libxsmm_bfloat16 *) libxsmm_aligned_malloc(C[i + 1] * sizeof(libxsmm_bfloat16), 2097152); delbias_libxsmm[i] = (libxsmm_bfloat16 *) libxsmm_aligned_malloc(C[i + 1] * sizeof(libxsmm_bfloat16), 2097152); } relumask_libxsmm = (unsigned char **)malloc(num_layers * sizeof(unsigned char *)); for (i = 0; i < num_layers; ++i) { relumask_libxsmm[i] = (unsigned char *)libxsmm_aligned_malloc(MB * C[i + 1] * sizeof(unsigned char), 2097152); } label_libxsmm = (int *)libxsmm_aligned_malloc(MB * sizeof(int), 2097152); printf("\n"); printf("##########################################\n"); printf("# Setting Up (custom-Storage) #\n"); printf("##########################################\n"); /* allocating handles */ my_fc_fwd = (my_fc_fwd_config *) malloc(num_layers * sizeof(my_fc_fwd_config)); my_fc_bwd = (my_fc_bwd_config *) malloc(num_layers * sizeof(my_fc_bwd_config)); my_opt = (my_opt_config *) malloc(num_layers * sizeof(my_opt_config)); /* setting up handles + scratch */ size_t max_bwd_scratch_size = 0, max_doutput_scratch_mark = 0; scratch_size = 0; /* setting up handles + scratch */ for (i = 0; i < num_layers; ++i) { /* * MNIST Specific where everywhere we use relu act except the last * layer */ if (i < num_layers - 1) { my_fuse = MY_ELTWISE_FUSE_RELU; } else { my_fuse = MY_ELTWISE_FUSE_NONE; } my_fc_fwd[i] = setup_my_fc_fwd(MB, C[i], C[i + 1], (MB % bn == 0) ? bn : MB, (C[i] % bc == 0) ? bc : C[i], (C[i + 1] % bk == 0) ? bk : C[i + 1], nThreads, my_fuse); my_fc_bwd[i] = setup_my_fc_bwd(MB, C[i], C[i + 1], (MB % bn == 0) ? bn : MB, (C[i] % bc == 0) ? bc : C[i], (C[i + 1] % bk == 0) ? bk : C[i + 1], nThreads, my_fuse, lr); my_opt[i] = setup_my_opt(C[i], C[i + 1], (C[i] % bc == 0) ? bc : C[i], (C[i + 1] % bk == 0) ? bk : C[i + 1], nThreads, lr); if (my_fc_bwd[i].scratch_size > 0 && my_fc_bwd[i].scratch_size > max_bwd_scratch_size) { max_bwd_scratch_size = my_fc_bwd[i].scratch_size; } if (my_fc_bwd[i].doutput_scratch_mark > 0 && my_fc_bwd[i].doutput_scratch_mark > max_doutput_scratch_mark) { max_doutput_scratch_mark = my_fc_bwd[i].doutput_scratch_mark; } /* let's allocate and bind scratch */ if (my_fc_fwd[i].scratch_size > 0 || my_fc_bwd[i].scratch_size > 0 || my_opt[i].scratch_size > 0) { size_t alloc_size = LIBXSMM_MAX(LIBXSMM_MAX(my_fc_fwd[i].scratch_size, my_fc_bwd[i].scratch_size), my_opt[i].scratch_size); if (alloc_size > scratch_size) { scratch_size = alloc_size; } } } /* softmax+loss is treated as N+! layer */ my_smax_fwd = setup_my_smax_fwd(MB, C[num_layers + 1], (MB % bn == 0) ? bn : MB, (C[num_layers + 1] % bk == 0) ? bk : C[num_layers + 1], nThreads); my_smax_bwd = setup_my_smax_bwd(MB, C[num_layers + 1], (MB % bn == 0) ? bn : MB, (C[num_layers + 1] % bk == 0) ? bk : C[num_layers + 1], nThreads, loss_weight); my_vnni_reformat = setup_my_vnni_reformat(MB, C[num_layers], (MB % bn == 0) ? bn : MB, (C[num_layers] % bk == 0) ? bk : C[num_layers], nThreads, my_fuse); if (my_smax_fwd.scratch_size > 0 || my_smax_bwd.scratch_size > 0) { size_t alloc_size = LIBXSMM_MAX(my_smax_fwd.scratch_size, my_smax_bwd.scratch_size); if (alloc_size > scratch_size) { scratch_size = alloc_size; } } scratch = libxsmm_aligned_scratch(scratch_size, 2097152); /* init data */ for (i = 0; i < num_layers + 2; ++i) { init_acts(my_fc_fwd[i], act_libxsmm[i], MB * C[i]); } for (i = 0; i < num_layers + 1; ++i) { init_delacts(my_fc_bwd[i], delact_libxsmm[i], MB * C[i]); } for (i = 0; i < num_layers; ++i) { /* init_master_weights(my_opt[i], fil_master[i], C[i]*C[i+1] ); */ my_init_buf(fil_master[i], C[i] * C[i + 1], 0, 0); libxsmm_rne_convert_fp32_bf16(fil_master[i], fil_libxsmm[i], C[i] * C[i + 1]); /* init_weights(my_fc_fwd[i], fil_libxsmm[i], C[i]*C[i+1]); */ init_dweights(my_fc_bwd[i], delfil_libxsmm[i], C[i] * C[i + 1]); } for (i = 0; i < num_layers; ++i) { my_init_buf_bf16(bias_libxsmm[i], C[i + 1], 0, 0); } for (i = 0; i < num_layers; ++i) { my_init_buf_bf16(delbias_libxsmm[i], C[i + 1], 0, 0); } zero_buf_int32(label_libxsmm, MB); /* Reading in the MNIST dataset */ int n_batches = NUM_TRAIN / MB, batch_id = 0; int n_epochs = iters, epoch_id = 0; libxsmm_bfloat16 *input_acts = (libxsmm_bfloat16 *) libxsmm_aligned_malloc(NUM_TRAIN * C[0] * sizeof(libxsmm_bfloat16), 2097152); /* Read in input data */ char *train_image_path = "../mlpdriver/mnist_data/train-images.idx3-ubyte"; char *train_label_path = "../mlpdriver/mnist_data/train-labels.idx1-ubyte"; char *test_image_path = "../mlpdriver/mnist_data/t10k-images.idx3-ubyte"; char *test_label_path = "../mlpdriver/mnist_data/t10k-labels.idx1-ubyte"; load_mnist(train_image_path, train_label_path, test_image_path, test_label_path); /* Format the input layer in NCNC blocked format */ int _i, _j; for (_i = 0; _i < n_batches * MB; _i++) { for (_j = 0; _j < C[0]; _j++) { float val = (_j < 784) ? (float)train_image[_i][_j] : (float)0.0; int batchid = _i / MB; int mb = _i % MB; int _bn = (MB % bn == 0) ? bn : MB; int _bc = (C[0] % bc == 0) ? bc : C[0]; libxsmm_bfloat16 *cur_pos = input_acts + batchid * MB * C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc); libxsmm_rne_convert_fp32_bf16(&val, cur_pos, 1); } } printf("###########################################\n"); printf("# Training MNIST with %d training samples #\n", n_batches * MB); printf("###########################################\n"); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) #pragma omp parallel private(i,j,epoch_id,batch_id) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif for (epoch_id = 0; epoch_id < n_epochs; epoch_id++) { for (batch_id = 0; batch_id < n_batches; batch_id++) { for (i = 0; i < num_layers; ++i) { libxsmm_bfloat16 *input_act_ptr = (i == 0) ? input_acts + batch_id * MB * C[0] : act_libxsmm[i]; my_fc_fwd_exec(my_fc_fwd[i], fil_libxsmm[i], input_act_ptr, act_libxsmm[i + 1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch); } my_smax_fwd_exec(my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers + 1], train_label + batch_id * MB, &loss, 0, tid, scratch); if ((tid == 0) && (batch_id == 0) && (epoch_id % 10 == 0 || epoch_id == n_epochs - 1)) { printf("Loss for epoch %d batch_id %d is %f\n", epoch_id, batch_id, loss); } my_smax_bwd_exec(my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers + 1], train_label + batch_id * MB, 0, tid, scratch); for (i = num_layers - 1; i > 0; --i) { my_fc_bwd_exec(my_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i + 1], delfil_libxsmm[i], act_libxsmm[i], delbias_libxsmm[i], (my_fc_bwd[i].fuse_relu_bwd > 0) ? relumask_libxsmm[i - 1] : relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch, fil_master[i]); } my_fc_bwd_exec(my_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0 + 1], delfil_libxsmm[0], input_acts + batch_id * MB * C[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch, fil_master[0]); } } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); gflop = 0.0; for (i = num_layers - 1; i > 0; --i) { gflop += (6.0 * (double)MB * (double)C[i] * (double)C[i + 1] * (double)((double)n_epochs * (double)n_batches)) / (1000.0 * 1000.0 * 1000.0); } gflop += (4.0 * (double)MB * (double)C[0] * (double)C[1] * (double)((double)n_epochs * (double)n_batches)) / (1000.0 * 1000.0 * 1000.0); printf("GFLOP = %.5g\n", gflop / (double)((double)n_epochs * (double)n_batches)); printf("fp time = %.5g\n", ((double)(l_total / ((double)n_epochs * (double)n_batches)))); printf("GFLOPS = %.5g\n", gflop / l_total); printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB); for (i = 0; i < num_layers; ++i) { printf("%i,", C[i]); } printf("%f,%f\n", ((double)(l_total / ((double)n_epochs * (double)n_batches))), gflop / l_total); #ifdef TEST_ACCURACY /* Test accuracy */ n_batches = NUM_TEST / MB; for (_i = 0; _i < n_batches * MB; _i++) { for (_j = 0; _j < C[0]; _j++) { float val = (_j < 784) ? (float)test_image[_i][_j] : 0.0; int batchid = _i / MB; int mb = _i % MB; int _bn = (MB % bn == 0) ? bn : MB; int _bc = (C[0] % bc == 0) ? bc : C[0]; libxsmm_bfloat16 *cur_pos = input_acts + batchid * MB * C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc); libxsmm_rne_convert_fp32_bf16(&val, cur_pos, 1); } } n_batches = NUM_TEST / MB; unsigned int hits = 0; unsigned int samples = 0; #if defined(_OPENMP) #pragma omp parallel private(i,j,batch_id) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif for (batch_id = 0; batch_id < n_batches; batch_id++) { for (i = 0; i < num_layers; ++i) { libxsmm_bfloat16 *input_act_ptr = (i == 0) ? input_acts + batch_id * MB * C[0] : act_libxsmm[i]; my_fc_fwd_exec(my_fc_fwd[i], fil_libxsmm[i], input_act_ptr, act_libxsmm[i + 1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch); } my_smax_fwd_exec(my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers + 1], test_label + batch_id * MB, &loss, 0, tid, scratch); if (tid == 0) { for (_i = 0; _i < MB; _i++) { int label = *(test_label + batch_id * MB + _i); int max_id = 0; float max_val = 0.0; libxsmm_convert_bf16_f32(act_libxsmm[num_layers + 1] + _i * 10, &max_val, 1); /* Find predicted label */ for (_j = 1; _j < 10; _j++) { libxsmm_bfloat16 val = *(act_libxsmm[num_layers + 1] + _i * 10 + _j); float f32_val; libxsmm_convert_bf16_f32(&val, &f32_val, 1); if (f32_val > max_val) { max_id = _j; max_val = f32_val; } } /* Compare with true label */ if (max_id == label) { hits++; } samples++; } } #pragma omp barrier } } printf("Accuracy is %f %% (%d test samples)\n", (1.0 * hits) / (1.0 * samples) * 100.0, samples); #endif /* deallocate data */ if (scratch != NULL) { libxsmm_free(scratch); } for (i = 0; i < num_layers; ++i) { if (i == 0) { libxsmm_free(act_libxsmm[i]); libxsmm_free(delact_libxsmm[i]); } libxsmm_free(act_libxsmm[i + 1]); libxsmm_free(delact_libxsmm[i + 1]); libxsmm_free(fil_libxsmm[i]); libxsmm_free(delfil_libxsmm[i]); libxsmm_free(bias_libxsmm[i]); libxsmm_free(delbias_libxsmm[i]); libxsmm_free(relumask_libxsmm[i]); libxsmm_free(fil_master[i]); } libxsmm_free(act_libxsmm[num_layers + 1]); libxsmm_free(label_libxsmm); libxsmm_free(input_acts); free(my_opt); free(my_fc_fwd); free(my_fc_bwd); free(act_libxsmm); free(delact_libxsmm); free(fil_master); free(fil_libxsmm); free(delfil_libxsmm); free(bias_libxsmm); free(delbias_libxsmm); free(relumask_libxsmm); free(C); /* some empty lines at the end */ printf("\n\n\n"); return 0; }
o10glogon_fmt_plug.c
/* * This software was written by JimF jfoug AT cox dot net * in 2016. No copyright is claimed, and the software is hereby * placed in the public domain. In case this attempt to disclaim * copyright and place the software in the public domain is deemed * null and void, then the software is Copyright (c) 2016 JimF * and it is hereby released to the general public under the following * terms: * * This software may be modified, redistributed, and used for any * purpose, in source and binary forms, with or without modification. * * This is oracle O10g-logon format. NOTE, if the hashes came from a * Oracle 10g, and the hash data can be sniffed from network traffic * TNS records. * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_o10glogon; #elif FMT_REGISTERS_H john_register_one(&fmt_o10glogon); #else #include <string.h> #include <openssl/des.h> #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "aes.h" #include "md5.h" #include "unicode.h" #include "base64_convert.h" #include "memdbg.h" #define FORMAT_LABEL "o10glogon" #define FORMAT_NAME "Oracle 10g-logon protocol" #define FORMAT_TAG "$o10glogon$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "DES-AES128-MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define MAX_USERNAME_LEN 30 #define SALT_SIZE (sizeof(ora10g_salt)) #define SALT_ALIGN (sizeof(unsigned int)) #define CIPHERTEXT_LENGTH 16 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define MAX_HASH_LEN (FORMAT_TAG_LEN+MAX_USERNAME_LEN+1+64+1+64+1+160) //#define DEBUG_ORACLE // // The keys are $o10glogon$oracle-user-name$auth_sess_key$auth_sess_key_c$auth_password // These can be found in sniffed network traffic. static struct fmt_tests tests[] = { {"$o10glogon$jimf$6DA8BE6D9713B7F9190DC0F87F1BB1BDFFE44EB1892E40915592980ECCE60AA3$1C08586339E5806DD45CF8E6D83CC6EA2B8CDCDE7CC9F00ADF43DA0F07309090$E2F3D778138213BF01FD743F2092FC976FD60AB2C9F4A1B1D9B08439325421B1", "JimF"}, {"$o10glogon$SESA218390$3B16F14C3DC6048C993000E2BF543BAB489DF7BD8D6061B7274CC9E1DB743E08$1695D5255EDF15CA6B1F14C5CB39C72C98E2CC2B62FB3224ECA5A6A6790511D4$F0F64E384E567F44E9DF8D7F4C029AA59770FA75094F1C26A66C45AFA9913987", "jimf"}, {"$o10glogon$TESTUSER$EEABE812530C6D4432F781DFC14A7C7F81EAE1804F340D3289732477FD351FCC$7B244D7A1DB5ABE553FB9B7325110024911FCBE95EF99E7965A754BC41CF31C0$4C5E28E66B6382117F9D41B08957A3B9E363B42760C33B44CA5D53EA90204ABE", "TESTPASS"}, {NULL} }; typedef struct ora10g_salt_t { int userlen, auth_pass_len; UTF16 user[MAX_USERNAME_LEN+1]; unsigned char auth_sesskey[32]; unsigned char auth_sesskey_c[32]; unsigned char auth_pass[80]; } ora10g_salt; static ora10g_salt *cur_salt; static UTF16 (*cur_key)[PLAINTEXT_LENGTH + 1]; static char (*plain_key)[PLAINTEXT_LENGTH + 1]; static int *cur_key_len; static int *cracked, any_cracked; static DES_key_schedule desschedule1; // key 0x0123456789abcdef static void init(struct fmt_main *self) { DES_set_key((DES_cblock *)"\x01\x23\x45\x67\x89\xab\xcd\xef", &desschedule1); #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif cur_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cur_key)); plain_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*plain_key)); cur_key_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cur_key_len)); cracked = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cracked)); } static void done(void) { MEM_FREE(cracked); MEM_FREE(cur_key_len); MEM_FREE(plain_key); MEM_FREE(cur_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *cp; char tmp[32*5+1]; UTF16 cur_key_mixedcase[MAX_USERNAME_LEN+2]; int len, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ciphertext += FORMAT_TAG_LEN; cp = strchr(ciphertext, '$'); if (!cp) return 0; // make sure username fits in MAX_USERNAME_LEN UTF16 if (cp-ciphertext > sizeof(tmp)-1) return 0; memcpy(tmp, ciphertext, cp-ciphertext); tmp[cp-ciphertext] = 0; len = enc_to_utf16((UTF16 *)cur_key_mixedcase, MAX_USERNAME_LEN+1, (unsigned char*)tmp, strlen(tmp)); if (len < 0 || (len == 0 && cp-ciphertext)) { static int error_shown = 0; #ifdef HAVE_FUZZ if (options.flags & (FLG_FUZZ_CHK | FLG_FUZZ_DUMP_CHK)) return 0; #endif if (!error_shown) fprintf(stderr, "%s: Input file is not UTF-8. Please use --input-enc to specify a codepage.\n", self->params.label); error_shown = 1; return 0; } if (len > MAX_USERNAME_LEN) return 0; ciphertext = cp+1; cp = strchr(ciphertext, '$'); if (!cp || cp-ciphertext != 64 || hexlenu(ciphertext, 0) != 64) return 0; ciphertext = cp+1; cp = strchr(ciphertext, '$'); if (!cp || cp-ciphertext != 64 || hexlenu(ciphertext, 0) != 64) return 0; ciphertext = cp+1; len = strlen(ciphertext); cp = strchr(ciphertext, '$'); if (!len || cp || len%16 || hexlenu(ciphertext, &extra) != len || extra) return 0; return 1; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[MAX_HASH_LEN*5+1]; strnzcpy(out, ciphertext, MAX_HASH_LEN+1); enc_strupper(&out[FORMAT_TAG_LEN]); return out; } static void set_salt(void *salt) { cur_salt = (ora10g_salt *)salt; } static void oracle_set_key(char *key, int index) { UTF16 cur_key_mixedcase[PLAINTEXT_LENGTH+1]; UTF16 *c; int key_length; strnzcpy(plain_key[index], key, sizeof(*plain_key)); // Can't use enc_to_utf16_be() because we need to do utf16_uc later key_length = enc_to_utf16(cur_key_mixedcase, PLAINTEXT_LENGTH, (unsigned char*)key, strlen(key)); if (key_length < 0) key_length = strlen16(cur_key_mixedcase); // We convert and uppercase in one shot key_length = utf16_uc(cur_key[index], PLAINTEXT_LENGTH, cur_key_mixedcase, key_length); // we have no way to 'undo' here, since the expansion is due to single-2-multi expansion in the upcase, // and we can not 'fix' our password. We simply have to 'not' properly decrypt this one, but protect ourselves. if (key_length < 0) key_length *= -1; cur_key_len[index] = key_length * sizeof(UTF16); // Now byte-swap to UTF16-BE c = cur_key[index]; while((*c = *c << 8 | *c >> 8)) c++; #ifdef DEBUG_ORACLE dump_stuff_msg("cur_key ", (unsigned char*)cur_key[index], cur_key_len[index]); #endif } static char *get_key(int index) { return plain_key[index]; } static void ORACLE_TNS_Decrypt_AES128_CBC (unsigned char aes_key_bytes[16], unsigned char* input, int input_len, unsigned char* output) { unsigned char iv[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; AES_KEY key; AES_set_decrypt_key(aes_key_bytes, 128, &key); AES_cbc_encrypt(input, output, input_len, &key, iv, AES_DECRYPT); } static int terminate_ascii_string (char* ascii_string_not_terminated, int len) { int ascii_len = 0; unsigned char padding_byte; int pos; for (pos=0; ; pos++) { if ((ascii_string_not_terminated[pos] < 32) | (ascii_string_not_terminated[pos] > 126)) break; } ascii_len = pos; padding_byte = ascii_string_not_terminated[pos]; for (;pos<len; pos++) { if (ascii_string_not_terminated[pos] != padding_byte) return -1; } ascii_string_not_terminated[ascii_len] = 0; return ascii_len; } static void ORACLE_TNS_Combine_SessKeys (unsigned char server_sesskey[16], unsigned char client_sesskey[16], unsigned char* output) { unsigned char combined_sesskeys[16]; int i; MD5_CTX ctx; for (i=0;i<16;i++) combined_sesskeys[i] = server_sesskey[i] ^ client_sesskey[i]; MD5_Init (&ctx); MD5_Update (&ctx, combined_sesskeys,16); MD5_Final (output, &ctx); } static int ORACLE_TNS_Decrypt_Password_10g (unsigned char OracleHash[8], unsigned char *auth_sesskey, unsigned char *auth_sesskey_c, unsigned char *auth_password, int auth_passwordlen, unsigned char *decrypted) { int passlen = 0; unsigned char aes_key_bytes[32]; unsigned char decrypted_server_sesskey[32]; unsigned char decrypted_client_sesskey[32]; unsigned char combined_sesskeys[16]; char decrypted_password[64]; memset (aes_key_bytes,0,sizeof(aes_key_bytes)); memcpy (aes_key_bytes,OracleHash,8); // Decrypt server and client session keys ORACLE_TNS_Decrypt_AES128_CBC (aes_key_bytes, auth_sesskey, 32, decrypted_server_sesskey); ORACLE_TNS_Decrypt_AES128_CBC (aes_key_bytes, auth_sesskey_c, 32, decrypted_client_sesskey); // Combine server and client session keys ORACLE_TNS_Combine_SessKeys (&decrypted_server_sesskey[16], &decrypted_client_sesskey[16], combined_sesskeys); // Decrypt auth password with combined session key ORACLE_TNS_Decrypt_AES128_CBC (combined_sesskeys, auth_password, auth_passwordlen, (unsigned char*) decrypted_password); // terminate decrypted password with NULL passlen = terminate_ascii_string (&decrypted_password[16], auth_passwordlen-16); if (passlen != -1) strncpy ((char*)decrypted, &decrypted_password[16], passlen); return passlen; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int idx = 0; if (any_cracked) { memset(cracked, 0, sizeof(*cracked) * count); any_cracked = 0; } #ifdef DEBUG_ORACLE dump_stuff_msg("cur_salt ", buf, cur_salt->userlen+key_length); #endif #ifdef _OPENMP #pragma omp parallel for for (idx = 0; idx < count; idx++) #endif { unsigned char buf[256], buf1[256]; unsigned int l; uint32_t iv[2]; DES_key_schedule desschedule2; l = cur_salt->userlen + cur_key_len[idx]; memcpy(buf, cur_salt->user, cur_salt->userlen); memcpy(buf + cur_salt->userlen, cur_key[idx], cur_key_len[idx]); iv[0] = iv[1] = 0; DES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule1, (DES_cblock *) iv, DES_ENCRYPT); DES_set_key((DES_cblock *)iv, &desschedule2); iv[0] = iv[1] = 0; DES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule2, (DES_cblock *) iv, DES_ENCRYPT); #ifdef DEBUG_ORACLE dump_stuff_msg(" iv (the hash key) ", (unsigned char*)&iv[0], 8); #endif ORACLE_TNS_Decrypt_Password_10g ((unsigned char*)iv, cur_salt->auth_sesskey, cur_salt->auth_sesskey_c, cur_salt->auth_pass, cur_salt->auth_pass_len, buf); if (!strncmp((char*)buf, plain_key[idx], strlen(plain_key[idx]))) { cracked[idx] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } return count; } static void *get_salt(char *ciphertext) { static ora10g_salt salt; UTF8 tmp[MAX_USERNAME_LEN*5+1]; char *cp; memset(&salt, 0, sizeof(salt)); ciphertext += FORMAT_TAG_LEN; cp = strchr(ciphertext, '$'); strncpy((char*)tmp, ciphertext, cp-ciphertext); tmp[cp-ciphertext] = 0; salt.userlen = enc_to_utf16_be(salt.user, MAX_USERNAME_LEN, tmp, cp-ciphertext); if (salt.userlen < 0) salt.userlen = strlen16(salt.user); salt.userlen *= 2; base64_convert(cp+1,e_b64_hex,64,salt.auth_sesskey,e_b64_raw,32,0,0); cp = strchr(cp+1, '$'); base64_convert(cp+1,e_b64_hex,64,salt.auth_sesskey_c,e_b64_raw,32,0,0); cp = strchr(cp+1, '$') + 1; salt.auth_pass_len = strlen(cp)/2; base64_convert(cp,e_b64_hex,salt.auth_pass_len*2,salt.auth_pass,e_b64_raw,salt.auth_pass_len,0,0); return &salt; } // Public domain hash function by DJ Bernstein (salt is a username) static int salt_hash(void *salt) { UTF16 *s = ((UTF16*)salt) + 1; unsigned int hash = 5381; while (*s) hash = ((hash << 5) + hash) ^ *s++; return hash & (SALT_HASH_SIZE - 1); } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int count) { return cracked[count]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_o10glogon = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE | FMT_CASE | FMT_OMP, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, salt_hash, NULL, set_salt, oracle_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
#if FMT_EXTERNS_H extern struct fmt_main fmt_o10glogon; #elif FMT_REGISTERS_H john_register_one(&fmt_o10glogon); #else #include <string.h> #include <openssl/des.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "aes.h" #include "md5.h" #include "unicode.h" #include "base64_convert.h" #include "memdbg.h" #define FORMAT_LABEL "o10glogon" #define FORMAT_NAME "Oracle 10g-logon protocol" #define FORMAT_TAG "$o10glogon$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "DES-AES128-MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define MAX_USERNAME_LEN 30 #define SALT_SIZE (sizeof(ora10g_salt)) #define SALT_ALIGN (sizeof(unsigned int)) #define CIPHERTEXT_LENGTH 16 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define MAX_HASH_LEN (FORMAT_TAG_LEN+MAX_USERNAME_LEN+1+64+1+64+1+160) //#define DEBUG_ORACLE // // The keys are $o10glogon$oracle-user-name$auth_sess_key$auth_sess_key_c$auth_password // These can be found in sniffed network traffic. static struct fmt_tests tests[] = { {"$o10glogon$jimf$6DA8BE6D9713B7F9190DC0F87F1BB1BDFFE44EB1892E40915592980ECCE60AA3$1C08586339E5806DD45CF8E6D83CC6EA2B8CDCDE7CC9F00ADF43DA0F07309090$E2F3D778138213BF01FD743F2092FC976FD60AB2C9F4A1B1D9B08439325421B1", "JimF"}, {"$o10glogon$SESA218390$3B16F14C3DC6048C993000E2BF543BAB489DF7BD8D6061B7274CC9E1DB743E08$1695D5255EDF15CA6B1F14C5CB39C72C98E2CC2B62FB3224ECA5A6A6790511D4$F0F64E384E567F44E9DF8D7F4C029AA59770FA75094F1C26A66C45AFA9913987", "jimf"}, {"$o10glogon$TESTUSER$EEABE812530C6D4432F781DFC14A7C7F81EAE1804F340D3289732477FD351FCC$7B244D7A1DB5ABE553FB9B7325110024911FCBE95EF99E7965A754BC41CF31C0$4C5E28E66B6382117F9D41B08957A3B9E363B42760C33B44CA5D53EA90204ABE", "TESTPASS"}, {NULL} }; typedef struct ora10g_salt_t { int userlen, auth_pass_len; UTF16 user[MAX_USERNAME_LEN+1]; unsigned char auth_sesskey[32]; unsigned char auth_sesskey_c[32]; unsigned char auth_pass[80]; } ora10g_salt; static ora10g_salt *cur_salt; static UTF16 (*cur_key)[PLAINTEXT_LENGTH + 1]; static char (*plain_key)[PLAINTEXT_LENGTH + 1]; static int *cur_key_len; static int *cracked, any_cracked; static DES_key_schedule desschedule1; // key 0x0123456789abcdef static void init(struct fmt_main *self) { DES_set_key((DES_cblock *)"\x01\x23\x45\x67\x89\xab\xcd\xef", &desschedule1); cur_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cur_key)); plain_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*plain_key)); cur_key_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cur_key_len)); cracked = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cracked)); } static void done(void) { MEM_FREE(cracked); MEM_FREE(cur_key_len); MEM_FREE(plain_key); MEM_FREE(cur_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *cp; char tmp[32*5+1]; UTF16 cur_key_mixedcase[MAX_USERNAME_LEN+2]; int len, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ciphertext += FORMAT_TAG_LEN; cp = strchr(ciphertext, '$'); if (!cp) return 0; // make sure username fits in MAX_USERNAME_LEN UTF16 if (cp-ciphertext > sizeof(tmp)-1) return 0; memcpy(tmp, ciphertext, cp-ciphertext); tmp[cp-ciphertext] = 0; len = enc_to_utf16((UTF16 *)cur_key_mixedcase, MAX_USERNAME_LEN+1, (unsigned char*)tmp, strlen(tmp)); if (len < 0 || (len == 0 && cp-ciphertext)) { static int error_shown = 0; #ifdef HAVE_FUZZ if (options.flags & (FLG_FUZZ_CHK | FLG_FUZZ_DUMP_CHK)) return 0; #endif if (!error_shown) fprintf(stderr, "%s: Input file is not UTF-8. Please use --input-enc to specify a codepage.\n", self->params.label); error_shown = 1; return 0; } if (len > MAX_USERNAME_LEN) return 0; ciphertext = cp+1; cp = strchr(ciphertext, '$'); if (!cp || cp-ciphertext != 64 || hexlenu(ciphertext, 0) != 64) return 0; ciphertext = cp+1; cp = strchr(ciphertext, '$'); if (!cp || cp-ciphertext != 64 || hexlenu(ciphertext, 0) != 64) return 0; ciphertext = cp+1; len = strlen(ciphertext); cp = strchr(ciphertext, '$'); if (!len || cp || len%16 || hexlenu(ciphertext, &extra) != len || extra) return 0; return 1; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[MAX_HASH_LEN*5+1]; strnzcpy(out, ciphertext, MAX_HASH_LEN+1); enc_strupper(&out[FORMAT_TAG_LEN]); return out; } static void set_salt(void *salt) { cur_salt = (ora10g_salt *)salt; } static void oracle_set_key(char *key, int index) { UTF16 cur_key_mixedcase[PLAINTEXT_LENGTH+1]; UTF16 *c; int key_length; strnzcpy(plain_key[index], key, sizeof(*plain_key)); // Can't use enc_to_utf16_be() because we need to do utf16_uc later key_length = enc_to_utf16(cur_key_mixedcase, PLAINTEXT_LENGTH, (unsigned char*)key, strlen(key)); if (key_length < 0) key_length = strlen16(cur_key_mixedcase); // We convert and uppercase in one shot key_length = utf16_uc(cur_key[index], PLAINTEXT_LENGTH, cur_key_mixedcase, key_length); // we have no way to 'undo' here, since the expansion is due to single-2-multi expansion in the upcase, // and we can not 'fix' our password. We simply have to 'not' properly decrypt this one, but protect ourselves. if (key_length < 0) key_length *= -1; cur_key_len[index] = key_length * sizeof(UTF16); // Now byte-swap to UTF16-BE c = cur_key[index]; while((*c = *c << 8 | *c >> 8)) c++; #ifdef DEBUG_ORACLE dump_stuff_msg("cur_key ", (unsigned char*)cur_key[index], cur_key_len[index]); #endif } static char *get_key(int index) { return plain_key[index]; } static void ORACLE_TNS_Decrypt_AES128_CBC (unsigned char aes_key_bytes[16], unsigned char* input, int input_len, unsigned char* output) { unsigned char iv[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; AES_KEY key; AES_set_decrypt_key(aes_key_bytes, 128, &key); AES_cbc_encrypt(input, output, input_len, &key, iv, AES_DECRYPT); } static int terminate_ascii_string (char* ascii_string_not_terminated, int len) { int ascii_len = 0; unsigned char padding_byte; int pos; for (pos=0; ; pos++) { if ((ascii_string_not_terminated[pos] < 32) | (ascii_string_not_terminated[pos] > 126)) break; } ascii_len = pos; padding_byte = ascii_string_not_terminated[pos]; for (;pos<len; pos++) { if (ascii_string_not_terminated[pos] != padding_byte) return -1; } ascii_string_not_terminated[ascii_len] = 0; return ascii_len; } static void ORACLE_TNS_Combine_SessKeys (unsigned char server_sesskey[16], unsigned char client_sesskey[16], unsigned char* output) { unsigned char combined_sesskeys[16]; int i; MD5_CTX ctx; for (i=0;i<16;i++) combined_sesskeys[i] = server_sesskey[i] ^ client_sesskey[i]; MD5_Init (&ctx); MD5_Update (&ctx, combined_sesskeys,16); MD5_Final (output, &ctx); } static int ORACLE_TNS_Decrypt_Password_10g (unsigned char OracleHash[8], unsigned char *auth_sesskey, unsigned char *auth_sesskey_c, unsigned char *auth_password, int auth_passwordlen, unsigned char *decrypted) { int passlen = 0; unsigned char aes_key_bytes[32]; unsigned char decrypted_server_sesskey[32]; unsigned char decrypted_client_sesskey[32]; unsigned char combined_sesskeys[16]; char decrypted_password[64]; memset (aes_key_bytes,0,sizeof(aes_key_bytes)); memcpy (aes_key_bytes,OracleHash,8); // Decrypt server and client session keys ORACLE_TNS_Decrypt_AES128_CBC (aes_key_bytes, auth_sesskey, 32, decrypted_server_sesskey); ORACLE_TNS_Decrypt_AES128_CBC (aes_key_bytes, auth_sesskey_c, 32, decrypted_client_sesskey); // Combine server and client session keys ORACLE_TNS_Combine_SessKeys (&decrypted_server_sesskey[16], &decrypted_client_sesskey[16], combined_sesskeys); // Decrypt auth password with combined session key ORACLE_TNS_Decrypt_AES128_CBC (combined_sesskeys, auth_password, auth_passwordlen, (unsigned char*) decrypted_password); // terminate decrypted password with NULL passlen = terminate_ascii_string (&decrypted_password[16], auth_passwordlen-16); if (passlen != -1) strncpy ((char*)decrypted, &decrypted_password[16], passlen); return passlen; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int idx = 0; if (any_cracked) { memset(cracked, 0, sizeof(*cracked) * count); any_cracked = 0; } #ifdef DEBUG_ORACLE dump_stuff_msg("cur_salt ", buf, cur_salt->userlen+key_length); #endif { unsigned char buf[256], buf1[256]; unsigned int l; uint32_t iv[2]; DES_key_schedule desschedule2; l = cur_salt->userlen + cur_key_len[idx]; memcpy(buf, cur_salt->user, cur_salt->userlen); memcpy(buf + cur_salt->userlen, cur_key[idx], cur_key_len[idx]); iv[0] = iv[1] = 0; DES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule1, (DES_cblock *) iv, DES_ENCRYPT); DES_set_key((DES_cblock *)iv, &desschedule2); iv[0] = iv[1] = 0; DES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule2, (DES_cblock *) iv, DES_ENCRYPT); #ifdef DEBUG_ORACLE dump_stuff_msg(" iv (the hash key) ", (unsigned char*)&iv[0], 8); #endif ORACLE_TNS_Decrypt_Password_10g ((unsigned char*)iv, cur_salt->auth_sesskey, cur_salt->auth_sesskey_c, cur_salt->auth_pass, cur_salt->auth_pass_len, buf); if (!strncmp((char*)buf, plain_key[idx], strlen(plain_key[idx]))) { cracked[idx] = 1; any_cracked |= 1; } } return count; } static void *get_salt(char *ciphertext) { static ora10g_salt salt; UTF8 tmp[MAX_USERNAME_LEN*5+1]; char *cp; memset(&salt, 0, sizeof(salt)); ciphertext += FORMAT_TAG_LEN; cp = strchr(ciphertext, '$'); strncpy((char*)tmp, ciphertext, cp-ciphertext); tmp[cp-ciphertext] = 0; salt.userlen = enc_to_utf16_be(salt.user, MAX_USERNAME_LEN, tmp, cp-ciphertext); if (salt.userlen < 0) salt.userlen = strlen16(salt.user); salt.userlen *= 2; base64_convert(cp+1,e_b64_hex,64,salt.auth_sesskey,e_b64_raw,32,0,0); cp = strchr(cp+1, '$'); base64_convert(cp+1,e_b64_hex,64,salt.auth_sesskey_c,e_b64_raw,32,0,0); cp = strchr(cp+1, '$') + 1; salt.auth_pass_len = strlen(cp)/2; base64_convert(cp,e_b64_hex,salt.auth_pass_len*2,salt.auth_pass,e_b64_raw,salt.auth_pass_len,0,0); return &salt; } // Public domain hash function by DJ Bernstein (salt is a username) static int salt_hash(void *salt) { UTF16 *s = ((UTF16*)salt) + 1; unsigned int hash = 5381; while (*s) hash = ((hash << 5) + hash) ^ *s++; return hash & (SALT_HASH_SIZE - 1); } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int count) { return cracked[count]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_o10glogon = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE | FMT_CASE | FMT_OMP, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, salt_hash, NULL, set_salt, oracle_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
#if FMT_EXTERNS_H extern struct fmt_main fmt_o10glogon; #elif FMT_REGISTERS_H john_register_one(&fmt_o10glogon); #else #include <string.h> #include <openssl/des.h> #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "aes.h" #include "md5.h" #include "unicode.h" #include "base64_convert.h" #include "memdbg.h" #define FORMAT_LABEL "o10glogon" #define FORMAT_NAME "Oracle 10g-logon protocol" #define FORMAT_TAG "$o10glogon$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "DES-AES128-MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define MAX_USERNAME_LEN 30 #define SALT_SIZE (sizeof(ora10g_salt)) #define SALT_ALIGN (sizeof(unsigned int)) #define CIPHERTEXT_LENGTH 16 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define MAX_HASH_LEN (FORMAT_TAG_LEN+MAX_USERNAME_LEN+1+64+1+64+1+160) //#define DEBUG_ORACLE // // The keys are $o10glogon$oracle-user-name$auth_sess_key$auth_sess_key_c$auth_password // These can be found in sniffed network traffic. static struct fmt_tests tests[] = { {"$o10glogon$jimf$6DA8BE6D9713B7F9190DC0F87F1BB1BDFFE44EB1892E40915592980ECCE60AA3$1C08586339E5806DD45CF8E6D83CC6EA2B8CDCDE7CC9F00ADF43DA0F07309090$E2F3D778138213BF01FD743F2092FC976FD60AB2C9F4A1B1D9B08439325421B1", "JimF"}, {"$o10glogon$SESA218390$3B16F14C3DC6048C993000E2BF543BAB489DF7BD8D6061B7274CC9E1DB743E08$1695D5255EDF15CA6B1F14C5CB39C72C98E2CC2B62FB3224ECA5A6A6790511D4$F0F64E384E567F44E9DF8D7F4C029AA59770FA75094F1C26A66C45AFA9913987", "jimf"}, {"$o10glogon$TESTUSER$EEABE812530C6D4432F781DFC14A7C7F81EAE1804F340D3289732477FD351FCC$7B244D7A1DB5ABE553FB9B7325110024911FCBE95EF99E7965A754BC41CF31C0$4C5E28E66B6382117F9D41B08957A3B9E363B42760C33B44CA5D53EA90204ABE", "TESTPASS"}, {NULL} }; typedef struct ora10g_salt_t { int userlen, auth_pass_len; UTF16 user[MAX_USERNAME_LEN+1]; unsigned char auth_sesskey[32]; unsigned char auth_sesskey_c[32]; unsigned char auth_pass[80]; } ora10g_salt; static ora10g_salt *cur_salt; static UTF16 (*cur_key)[PLAINTEXT_LENGTH + 1]; static char (*plain_key)[PLAINTEXT_LENGTH + 1]; static int *cur_key_len; static int *cracked, any_cracked; static DES_key_schedule desschedule1; // key 0x0123456789abcdef static void init(struct fmt_main *self) { DES_set_key((DES_cblock *)"\x01\x23\x45\x67\x89\xab\xcd\xef", &desschedule1); #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif cur_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cur_key)); plain_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*plain_key)); cur_key_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cur_key_len)); cracked = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cracked)); } static void done(void) { MEM_FREE(cracked); MEM_FREE(cur_key_len); MEM_FREE(plain_key); MEM_FREE(cur_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *cp; char tmp[32*5+1]; UTF16 cur_key_mixedcase[MAX_USERNAME_LEN+2]; int len, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ciphertext += FORMAT_TAG_LEN; cp = strchr(ciphertext, '$'); if (!cp) return 0; // make sure username fits in MAX_USERNAME_LEN UTF16 if (cp-ciphertext > sizeof(tmp)-1) return 0; memcpy(tmp, ciphertext, cp-ciphertext); tmp[cp-ciphertext] = 0; len = enc_to_utf16((UTF16 *)cur_key_mixedcase, MAX_USERNAME_LEN+1, (unsigned char*)tmp, strlen(tmp)); if (len < 0 || (len == 0 && cp-ciphertext)) { static int error_shown = 0; #ifdef HAVE_FUZZ if (options.flags & (FLG_FUZZ_CHK | FLG_FUZZ_DUMP_CHK)) return 0; #endif if (!error_shown) fprintf(stderr, "%s: Input file is not UTF-8. Please use --input-enc to specify a codepage.\n", self->params.label); error_shown = 1; return 0; } if (len > MAX_USERNAME_LEN) return 0; ciphertext = cp+1; cp = strchr(ciphertext, '$'); if (!cp || cp-ciphertext != 64 || hexlenu(ciphertext, 0) != 64) return 0; ciphertext = cp+1; cp = strchr(ciphertext, '$'); if (!cp || cp-ciphertext != 64 || hexlenu(ciphertext, 0) != 64) return 0; ciphertext = cp+1; len = strlen(ciphertext); cp = strchr(ciphertext, '$'); if (!len || cp || len%16 || hexlenu(ciphertext, &extra) != len || extra) return 0; return 1; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[MAX_HASH_LEN*5+1]; strnzcpy(out, ciphertext, MAX_HASH_LEN+1); enc_strupper(&out[FORMAT_TAG_LEN]); return out; } static void set_salt(void *salt) { cur_salt = (ora10g_salt *)salt; } static void oracle_set_key(char *key, int index) { UTF16 cur_key_mixedcase[PLAINTEXT_LENGTH+1]; UTF16 *c; int key_length; strnzcpy(plain_key[index], key, sizeof(*plain_key)); // Can't use enc_to_utf16_be() because we need to do utf16_uc later key_length = enc_to_utf16(cur_key_mixedcase, PLAINTEXT_LENGTH, (unsigned char*)key, strlen(key)); if (key_length < 0) key_length = strlen16(cur_key_mixedcase); // We convert and uppercase in one shot key_length = utf16_uc(cur_key[index], PLAINTEXT_LENGTH, cur_key_mixedcase, key_length); // we have no way to 'undo' here, since the expansion is due to single-2-multi expansion in the upcase, // and we can not 'fix' our password. We simply have to 'not' properly decrypt this one, but protect ourselves. if (key_length < 0) key_length *= -1; cur_key_len[index] = key_length * sizeof(UTF16); // Now byte-swap to UTF16-BE c = cur_key[index]; while((*c = *c << 8 | *c >> 8)) c++; #ifdef DEBUG_ORACLE dump_stuff_msg("cur_key ", (unsigned char*)cur_key[index], cur_key_len[index]); #endif } static char *get_key(int index) { return plain_key[index]; } static void ORACLE_TNS_Decrypt_AES128_CBC (unsigned char aes_key_bytes[16], unsigned char* input, int input_len, unsigned char* output) { unsigned char iv[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; AES_KEY key; AES_set_decrypt_key(aes_key_bytes, 128, &key); AES_cbc_encrypt(input, output, input_len, &key, iv, AES_DECRYPT); } static int terminate_ascii_string (char* ascii_string_not_terminated, int len) { int ascii_len = 0; unsigned char padding_byte; int pos; for (pos=0; ; pos++) { if ((ascii_string_not_terminated[pos] < 32) | (ascii_string_not_terminated[pos] > 126)) break; } ascii_len = pos; padding_byte = ascii_string_not_terminated[pos]; for (;pos<len; pos++) { if (ascii_string_not_terminated[pos] != padding_byte) return -1; } ascii_string_not_terminated[ascii_len] = 0; return ascii_len; } static void ORACLE_TNS_Combine_SessKeys (unsigned char server_sesskey[16], unsigned char client_sesskey[16], unsigned char* output) { unsigned char combined_sesskeys[16]; int i; MD5_CTX ctx; for (i=0;i<16;i++) combined_sesskeys[i] = server_sesskey[i] ^ client_sesskey[i]; MD5_Init (&ctx); MD5_Update (&ctx, combined_sesskeys,16); MD5_Final (output, &ctx); } static int ORACLE_TNS_Decrypt_Password_10g (unsigned char OracleHash[8], unsigned char *auth_sesskey, unsigned char *auth_sesskey_c, unsigned char *auth_password, int auth_passwordlen, unsigned char *decrypted) { int passlen = 0; unsigned char aes_key_bytes[32]; unsigned char decrypted_server_sesskey[32]; unsigned char decrypted_client_sesskey[32]; unsigned char combined_sesskeys[16]; char decrypted_password[64]; memset (aes_key_bytes,0,sizeof(aes_key_bytes)); memcpy (aes_key_bytes,OracleHash,8); // Decrypt server and client session keys ORACLE_TNS_Decrypt_AES128_CBC (aes_key_bytes, auth_sesskey, 32, decrypted_server_sesskey); ORACLE_TNS_Decrypt_AES128_CBC (aes_key_bytes, auth_sesskey_c, 32, decrypted_client_sesskey); // Combine server and client session keys ORACLE_TNS_Combine_SessKeys (&decrypted_server_sesskey[16], &decrypted_client_sesskey[16], combined_sesskeys); // Decrypt auth password with combined session key ORACLE_TNS_Decrypt_AES128_CBC (combined_sesskeys, auth_password, auth_passwordlen, (unsigned char*) decrypted_password); // terminate decrypted password with NULL passlen = terminate_ascii_string (&decrypted_password[16], auth_passwordlen-16); if (passlen != -1) strncpy ((char*)decrypted, &decrypted_password[16], passlen); return passlen; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int idx = 0; if (any_cracked) { memset(cracked, 0, sizeof(*cracked) * count); any_cracked = 0; } #ifdef DEBUG_ORACLE dump_stuff_msg("cur_salt ", buf, cur_salt->userlen+key_length); #endif #ifdef _OPENMP #pragma omp parallel for for (idx = 0; idx < count; idx++) #endif { unsigned char buf[256], buf1[256]; unsigned int l; uint32_t iv[2]; DES_key_schedule desschedule2; l = cur_salt->userlen + cur_key_len[idx]; memcpy(buf, cur_salt->user, cur_salt->userlen); memcpy(buf + cur_salt->userlen, cur_key[idx], cur_key_len[idx]); iv[0] = iv[1] = 0; DES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule1, (DES_cblock *) iv, DES_ENCRYPT); DES_set_key((DES_cblock *)iv, &desschedule2); iv[0] = iv[1] = 0; DES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule2, (DES_cblock *) iv, DES_ENCRYPT); #ifdef DEBUG_ORACLE dump_stuff_msg(" iv (the hash key) ", (unsigned char*)&iv[0], 8); #endif ORACLE_TNS_Decrypt_Password_10g ((unsigned char*)iv, cur_salt->auth_sesskey, cur_salt->auth_sesskey_c, cur_salt->auth_pass, cur_salt->auth_pass_len, buf); if (!strncmp((char*)buf, plain_key[idx], strlen(plain_key[idx]))) { cracked[idx] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } return count; } static void *get_salt(char *ciphertext) { static ora10g_salt salt; UTF8 tmp[MAX_USERNAME_LEN*5+1]; char *cp; memset(&salt, 0, sizeof(salt)); ciphertext += FORMAT_TAG_LEN; cp = strchr(ciphertext, '$'); strncpy((char*)tmp, ciphertext, cp-ciphertext); tmp[cp-ciphertext] = 0; salt.userlen = enc_to_utf16_be(salt.user, MAX_USERNAME_LEN, tmp, cp-ciphertext); if (salt.userlen < 0) salt.userlen = strlen16(salt.user); salt.userlen *= 2; base64_convert(cp+1,e_b64_hex,64,salt.auth_sesskey,e_b64_raw,32,0,0); cp = strchr(cp+1, '$'); base64_convert(cp+1,e_b64_hex,64,salt.auth_sesskey_c,e_b64_raw,32,0,0); cp = strchr(cp+1, '$') + 1; salt.auth_pass_len = strlen(cp)/2; base64_convert(cp,e_b64_hex,salt.auth_pass_len*2,salt.auth_pass,e_b64_raw,salt.auth_pass_len,0,0); return &salt; } // Public domain hash function by DJ Bernstein (salt is a username) static int salt_hash(void *salt) { UTF16 *s = ((UTF16*)salt) + 1; unsigned int hash = 5381; while (*s) hash = ((hash << 5) + hash) ^ *s++; return hash & (SALT_HASH_SIZE - 1); } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int count) { return cracked[count]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_o10glogon = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE | FMT_CASE | FMT_OMP, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, salt_hash, NULL, set_salt, oracle_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
convolution_pack1to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack1to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { int32x4_t _sum0 = vdupq_n_s32(0); const signed char* kptr = weight_data_int8.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w; for (int k = 0; k < maxk; k++) { int8x8_t _val = vdup_n_s8(sptr[space_ofs[k]]); int8x8_t _w = vld1_s8(kptr); int16x8_t _s0 = vmull_s8(_val, _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); kptr += 8; } } vst1q_s32(outptr + j * 4, _sum0); } outptr += outw * 4; } } }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2021 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack1to4_int8_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option & opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; //kernel offsets std: : vector < int >_space_ofs(maxk); int *space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } //num_output for (int p = 0; p < outch; p++) { int *outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { int32x4_t _sum0 = vdupq_n_s32(0); const signed char *kptr = weight_data_int8.channel(p); //channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const signed char *sptr = m.row < const signed char >(i * stride_h) + j * stride_w; for (int k = 0; k < maxk; k++) { int8x8_t _val = vdup_n_s8(sptr[space_ofs[k]]); int8x8_t _w = vld1_s8(kptr); int16x8_t _s0 = vmull_s8(_val, _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); kptr += 8; } } vst1q_s32(outptr + j * 4, _sum0); } outptr += outw * 4; } } }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2021 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack1to4_int8_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option & opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; //kernel offsets std: : vector < int >_space_ofs(maxk); int *space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } //num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int *outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { int32x4_t _sum0 = vdupq_n_s32(0); const signed char *kptr = weight_data_int8.channel(p); //channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const signed char *sptr = m.row < const signed char >(i * stride_h) + j * stride_w; for (int k = 0; k < maxk; k++) { int8x8_t _val = vdup_n_s8(sptr[space_ofs[k]]); int8x8_t _w = vld1_s8(kptr); int16x8_t _s0 = vmull_s8(_val, _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); kptr += 8; } } vst1q_s32(outptr + j * 4, _sum0); } outptr += outw * 4; } } }
convolution_packnto1_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_packnto1_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl); const __fp16* kptr = weight_data_fp16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * packn; for (int k = 0; k < maxk; k++) { vfloat32m2_t _val = vfwcvt_f_f_v_f32m2(vle16_v_f16m1(sptr + space_ofs[k] * packn, vl), vl); vfloat32m2_t _w = vfwcvt_f_f_v_f32m2(vle16_v_f16m1(kptr, vl), vl); _sum = vfmacc_vv_f32m2(_sum, _val, _w, vl); kptr += packn; } } sum = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m2_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = (__fp16)sum; } outptr += outw; } } } static void convolution_packnto1_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const __fp16* bias_data_ptr = bias_data_fp16; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __fp16 sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); const __fp16* kptr = weight_data_fp16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * packn; for (int k = 0; k < maxk; k++) { vfloat16m1_t _val = vle16_v_f16m1(sptr + space_ofs[k] * packn, vl); vfloat16m1_t _w = vle16_v_f16m1(kptr, vl); _sum = vfmacc_vv_f16m1(_sum, _val, _w, vl); kptr += packn; } } sum = vfmv_f_s_f16m1_f16(vfredsum_vs_f16m1_f16m1(vfloat16m1_t(), _sum, vfmv_s_f_f16m1(vfloat16m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2021 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static void convolution_packnto1_fp16s_rvv(const Mat & bottom_blob, Mat & top_blob, const Mat & weight_data_fp16, const Mat & bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat & activation_params, const Option & opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; //kernel offsets std: : vector < int >_space_ofs(maxk); int *space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float *bias_data_ptr = bias_data; //num_output for (int p = 0; p < outch; p++) { __fp16 *outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0. f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat32m2_t _sum = vfmv_v_f_f32m2(0. f, vl); const __fp16 *kptr = weight_data_fp16.channel(p); //channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16 *sptr = m.row < const __fp16 > (i * stride_h) + j * stride_w * packn; for (int k = 0; k < maxk; k++) { vfloat32m2_t _val = vfwcvt_f_f_v_f32m2(vle16_v_f16m1(sptr + space_ofs[k] * packn, vl), vl); vfloat32m2_t _w = vfwcvt_f_f_v_f32m2(vle16_v_f16m1(kptr, vl), vl); _sum = vfmacc_vv_f32m2(_sum, _val, _w, vl); kptr += packn; } } sum = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m2_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = (__fp16) sum; } outptr += outw; } } } static void convolution_packnto1_fp16sa_rvv(const Mat & bottom_blob, Mat & top_blob, const Mat & weight_data_fp16, const Mat & bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat & activation_params, const Option & opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; //kernel offsets std: : vector < int >_space_ofs(maxk); int *space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const __fp16 *bias_data_ptr = bias_data_fp16; //num_output for (int p = 0; p < outch; p++) { __fp16 *outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __fp16 sum = 0. f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat16m1_t _sum = vfmv_v_f_f16m1(0. f, vl); const __fp16 *kptr = weight_data_fp16.channel(p); //channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16 *sptr = m.row < const __fp16 > (i * stride_h) + j * stride_w * packn; for (int k = 0; k < maxk; k++) { vfloat16m1_t _val = vle16_v_f16m1(sptr + space_ofs[k] * packn, vl); vfloat16m1_t _w = vle16_v_f16m1(kptr, vl); _sum = vfmacc_vv_f16m1(_sum, _val, _w, vl); kptr += packn; } } sum = vfmv_f_s_f16m1_f16(vfredsum_vs_f16m1_f16m1(vfloat16m1_t(), _sum, vfmv_s_f_f16m1(vfloat16m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2021 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static void convolution_packnto1_fp16s_rvv(const Mat & bottom_blob, Mat & top_blob, const Mat & weight_data_fp16, const Mat & bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat & activation_params, const Option & opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; //kernel offsets std: : vector < int >_space_ofs(maxk); int *space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float *bias_data_ptr = bias_data; //num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16 *outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0. f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat32m2_t _sum = vfmv_v_f_f32m2(0. f, vl); const __fp16 *kptr = weight_data_fp16.channel(p); //channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16 *sptr = m.row < const __fp16 > (i * stride_h) + j * stride_w * packn; for (int k = 0; k < maxk; k++) { vfloat32m2_t _val = vfwcvt_f_f_v_f32m2(vle16_v_f16m1(sptr + space_ofs[k] * packn, vl), vl); vfloat32m2_t _w = vfwcvt_f_f_v_f32m2(vle16_v_f16m1(kptr, vl), vl); _sum = vfmacc_vv_f32m2(_sum, _val, _w, vl); kptr += packn; } } sum = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m2_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = (__fp16) sum; } outptr += outw; } } } static void convolution_packnto1_fp16sa_rvv(const Mat & bottom_blob, Mat & top_blob, const Mat & weight_data_fp16, const Mat & bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat & activation_params, const Option & opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; //kernel offsets std: : vector < int >_space_ofs(maxk); int *space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const __fp16 *bias_data_ptr = bias_data_fp16; //num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16 *outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __fp16 sum = 0. f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat16m1_t _sum = vfmv_v_f_f16m1(0. f, vl); const __fp16 *kptr = weight_data_fp16.channel(p); //channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16 *sptr = m.row < const __fp16 > (i * stride_h) + j * stride_w * packn; for (int k = 0; k < maxk; k++) { vfloat16m1_t _val = vle16_v_f16m1(sptr + space_ofs[k] * packn, vl); vfloat16m1_t _w = vle16_v_f16m1(kptr, vl); _sum = vfmacc_vv_f16m1(_sum, _val, _w, vl); kptr += packn; } } sum = vfmv_f_s_f16m1_f16(vfredsum_vs_f16m1_f16m1(vfloat16m1_t(), _sum, vfmv_s_f_f16m1(vfloat16m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
GB_binop__eq_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_int8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__eq_int8) // A.*B function (eWiseMult): GB (_AemultB_03__eq_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int8) // A*D function (colscale): GB (_AxD__eq_int8) // D*A function (rowscale): GB (_DxB__eq_int8) // C+=B function (dense accum): GB (_Cdense_accumB__eq_int8) // C+=b function (dense accum): GB (_Cdense_accumb__eq_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int8) // C=scalar+B GB (_bind1st__eq_int8) // C=scalar+B' GB (_bind1st_tran__eq_int8) // C=A+scalar GB (_bind2nd__eq_int8) // C=A'+scalar GB (_bind2nd_tran__eq_int8) // C type: bool // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_INT8 || GxB_NO_EQ_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__eq_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__eq_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_int8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__eq_int8) // A.*B function (eWiseMult): GB (_AemultB_03__eq_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int8) // A*D function (colscale): GB (_AxD__eq_int8) // D*A function (rowscale): GB (_DxB__eq_int8) // C+=B function (dense accum): GB (_Cdense_accumB__eq_int8) // C+=b function (dense accum): GB (_Cdense_accumb__eq_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int8) // C=scalar+B GB (_bind1st__eq_int8) // C=scalar+B' GB (_bind1st_tran__eq_int8) // C=A+scalar GB (_bind2nd__eq_int8) // C=A'+scalar GB (_bind2nd_tran__eq_int8) // C type: bool // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_INT8 || GxB_NO_EQ_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__eq_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__eq_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_int8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__eq_int8) // A.*B function (eWiseMult): GB (_AemultB_03__eq_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int8) // A*D function (colscale): GB (_AxD__eq_int8) // D*A function (rowscale): GB (_DxB__eq_int8) // C+=B function (dense accum): GB (_Cdense_accumB__eq_int8) // C+=b function (dense accum): GB (_Cdense_accumb__eq_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int8) // C=scalar+B GB (_bind1st__eq_int8) // C=scalar+B' GB (_bind1st_tran__eq_int8) // C=A+scalar GB (_bind2nd__eq_int8) // C=A'+scalar GB (_bind2nd_tran__eq_int8) // C type: bool // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_INT8 || GxB_NO_EQ_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__eq_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__eq_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cpu.c
/** * @file main.c * @brief This file contains the source code of the application to parallelise. * @details This application is a classic heat spread simulation. * @author Ludovic Capelli **/ #include <stdio.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> #include <inttypes.h> #include <math.h> #include <sched.h> #include <unistd.h> #include <string.h> #include "util.h" /** * @argv[0] Name of the program * @argv[1] path to the dataset to load **/ int main(int argc, char* argv[]) { (void)argc; (void)argv; MPI_Init(NULL, NULL); ///////////////////////////////////////////////////// // -- PREPARATION 1: COLLECT USEFUL INFORMATION -- // ///////////////////////////////////////////////////// // Ranks for convenience so that we don't throw raw values all over the code const int MASTER_PROCESS_RANK = 0; // The rank of the MPI process in charge of this instance int my_rank; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Number of MPI processes in total, commonly called "comm_size" for "communicator size". int comm_size; MPI_Comm_size(MPI_COMM_WORLD, &comm_size); /// Rank of the first MPI process const int FIRST_PROCESS_RANK = 0; /// Rank of the last MPI process const int LAST_PROCESS_RANK = comm_size - 1; // Rank of my up neighbour if any int up_neighbour_rank = (my_rank == FIRST_PROCESS_RANK) ? MPI_PROC_NULL : my_rank - 1; // Rank of my down neighbour if any int down_neighbour_rank = (my_rank == LAST_PROCESS_RANK) ? MPI_PROC_NULL : my_rank + 1; //report_placement(); //////////////////////////////////////////////////////////////////// // -- PREPARATION 2: INITIALISE TEMPERATURES ON MASTER PROCESS -- // //////////////////////////////////////////////////////////////////// /// Array that will contain my part chunk. It will include the 2 ghost rows (1 up, 1 down) double temperatures[ROWS_PER_MPI_PROCESS+2][COLUMNS_PER_MPI_PROCESS]; /// Temperatures from the previous iteration, same dimensions as the array above. double temperatures_last[ROWS_PER_MPI_PROCESS+2][COLUMNS_PER_MPI_PROCESS]; /// On master process only: contains all temperatures read from input file. double all_temperatures[ROWS][COLUMNS]; // The master MPI process will read a chunk from the file, send it to the corresponding MPI process and repeat until all chunks are read. if(my_rank == MASTER_PROCESS_RANK) { initialise_temperatures(all_temperatures); } MPI_Barrier(MPI_COMM_WORLD); /////////////////////////////////////////// // ^ // // / \ // // / | \ CODE FROM HERE IS TIMED // // / o \ // // /_______\ // /////////////////////////////////////////// //////////////////////////////////////////////////////// // -- TASK 1: DISTRIBUTE DATA TO ALL MPI PROCESSES -- // //////////////////////////////////////////////////////// double total_time_so_far = 0.0; double start_time = MPI_Wtime(); if(my_rank == MASTER_PROCESS_RANK) { for(int i = 0; i < comm_size; i++) { // Is the i'th chunk meant for me, the master MPI process? if(i != my_rank) { // No, so send the corresponding chunk to that MPI process. MPI_Ssend(&all_temperatures[i * ROWS_PER_MPI_PROCESS][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, i, 0, MPI_COMM_WORLD); } else { // Yes, let's copy it straight for the array in which we read the file into. for(int j = 1; j <= ROWS_PER_MPI_PROCESS; j++) { for(int k = 0; k < COLUMNS_PER_MPI_PROCESS; k++) { temperatures_last[j][k] = all_temperatures[j-1][k]; } } } } } else { // Receive my chunk. MPI_Recv(&temperatures_last[1][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } // MPI_Scatter(all_temperatures, ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, &temperatures_last[1][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_COMM_WORLD); // Copy the temperatures into the current iteration temperature as well #pragma omp parallel for shared(temperatures, temperatures_last, ROWS_PER_MPI_PROCESS, ROWS_PER_MPI_PROCESS) collapse(2) for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++) { for(int j = 0; j < COLUMNS_PER_MPI_PROCESS; j++) { temperatures[i][j] = temperatures_last[i][j]; } } if(my_rank == MASTER_PROCESS_RANK) { printf("Data acquisition complete.\n"); } // Wait for everybody to receive their part before we can start processing MPI_Barrier(MPI_COMM_WORLD); ///////////////////////////// // TASK 2: DATA PROCESSING // ///////////////////////////// int iteration_count = 0; /// Maximum temperature change observed across all MPI processes double global_temperature_change; /// Maximum temperature change for us double my_temperature_change; /// The last snapshot made double snapshot[ROWS][COLUMNS]; while(total_time_so_far < MAX_TIME) { my_temperature_change = 0.0; // //////////////////////////////////////// // -- SUBTASK 1: EXCHANGE GHOST CELLS -- // // //////////////////////////////////////// // Send data to up neighbour for its ghost cells. If my up_neighbour_rank is MPI_PROC_NULL, this MPI_Ssend will do nothing. MPI_Ssend(&temperatures[1][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, up_neighbour_rank, 0, MPI_COMM_WORLD); // Receive data from down neighbour to fill our ghost cells. If my down_neighbour_rank is MPI_PROC_NULL, this MPI_Recv will do nothing. MPI_Recv(&temperatures_last[ROWS_PER_MPI_PROCESS+1][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, down_neighbour_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); // Send data to down neighbour for its ghost cells. If my down_neighbour_rank is MPI_PROC_NULL, this MPI_Ssend will do nothing. MPI_Ssend(&temperatures[ROWS_PER_MPI_PROCESS][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, down_neighbour_rank, 0, MPI_COMM_WORLD); // Receive data from up neighbour to fill our ghost cells. If my up_neighbour_rank is MPI_PROC_NULL, this MPI_Recv will do nothing. MPI_Recv(&temperatures_last[0][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, up_neighbour_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); // ///////////// Using SendRecv // // Send data to up neighbour from down neighbour // MPI_Sendrecv(&temperatures[1][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, up_neighbour_rank, 0, &temperatures_last[ROWS_PER_MPI_PROCESS+1][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, down_neighbour_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); // // Send data to down neighbour from up neighbour // MPI_Sendrecv(&temperatures[ROWS_PER_MPI_PROCESS][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, down_neighbour_rank, 0, &temperatures_last[0][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, up_neighbour_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); ///////////////////////////////////////////// // -- SUBTASK 2: PROPAGATE TEMPERATURES -- // ///////////////////////////////////////////// #pragma omp parallel for shared(temperatures, temperatures_last, ROWS_PER_MPI_PROCESS, ROWS_PER_MPI_PROCESS, MAX_TEMPERATURE) collapse(2) for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++) { // Process all cells between the first and last columns excluded, which each has both left and right neighbours for(int j = 1; j < COLUMNS_PER_MPI_PROCESS - 1; j++) { if(temperatures[i][j] != MAX_TEMPERATURE) { temperatures[i][j] = 0.25 * (temperatures_last[i-1][j ] + temperatures_last[i+1][j ] + temperatures_last[i ][j-1] + temperatures_last[i ][j+1]); } } } #pragma omp parallel for shared(temperatures, temperatures_last, ROWS_PER_MPI_PROCESS, ROWS_PER_MPI_PROCESS, MAX_TEMPERATURE) for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++) { // Process the cell at the first column, which has no left neighbour if(temperatures[i][0] != MAX_TEMPERATURE) { temperatures[i][0] = (temperatures_last[i-1][0] + temperatures_last[i+1][0] + temperatures_last[i ][1]) / 3.0; } } #pragma omp parallel for shared(temperatures, temperatures_last, ROWS_PER_MPI_PROCESS, ROWS_PER_MPI_PROCESS, MAX_TEMPERATURE) for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++) { // Process the cell at the last column, which has no right neighbour if(temperatures[i][COLUMNS_PER_MPI_PROCESS - 1] != MAX_TEMPERATURE) { temperatures[i][COLUMNS_PER_MPI_PROCESS - 1] = (temperatures_last[i-1][COLUMNS_PER_MPI_PROCESS - 1] + temperatures_last[i+1][COLUMNS_PER_MPI_PROCESS - 1] + temperatures_last[i ][COLUMNS_PER_MPI_PROCESS - 2]) / 3.0; } } // Start the gather of the snapshot here MPI_Request gather_request; if(iteration_count % SNAPSHOT_INTERVAL == 0) { MPI_Igather(&temperatures[1][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, snapshot, ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_COMM_WORLD, &gather_request); } /////////////////////////////////////////////////////// // -- SUBTASK 3: CALCULATE MAX TEMPERATURE CHANGE -- // /////////////////////////////////////////////////////// my_temperature_change = 0.0; #pragma omp parallel for shared(temperatures, temperatures_last, ROWS_PER_MPI_PROCESS, ROWS_PER_MPI_PROCESS) collapse(2) reduciton(max:my_temperature_change) for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++) { for(int j = 0; j < COLUMNS_PER_MPI_PROCESS; j++) { my_temperature_change = fmax(fabs(temperatures[i][j] - temperatures_last[i][j]), my_temperature_change); temperatures_last[i][j] = temperatures[i][j]; } } ////////////////////////////////////////////////////////// // -- SUBTASK 4: FIND MAX TEMPERATURE CHANGE OVERALL -- // ////////////////////////////////////////////////////////// MPI_Request allreduce_request; MPI_Iallreduce(&my_temperature_change, &global_temperature_change, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD, &allreduce_request); // MPI_Allreduce(&my_temperature_change, &global_temperature_change, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); // Wait for the all reduce to find the max temp to complete MPI_Wait(&allreduce_request, MPI_STATUS_IGNORE); /////////////////////////////////// // -- SUBTASK 6: GET SNAPSHOT -- // /////////////////////////////////// if(iteration_count % SNAPSHOT_INTERVAL == 0) { if(my_rank == MASTER_PROCESS_RANK) { // Wait there to gather the snapshot MPI_Wait(&gather_request, MPI_STATUS_IGNORE); printf("Iteration %d: %.18f\n", iteration_count, global_temperature_change); } // MPI_Gather(&temperatures[1][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, snapshot, ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_COMM_WORLD); } // Calculate the total time spent processing if(my_rank == MASTER_PROCESS_RANK) { total_time_so_far = MPI_Wtime() - start_time; } // Send total timer to everybody so they too can exit the loop if more than the allowed runtime has elapsed already MPI_Bcast(&total_time_so_far, 1, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_COMM_WORLD); // Update the iteration number iteration_count++; } /////////////////////////////////////////////// // ^ // // / \ // // / | \ CODE FROM HERE IS NOT TIMED // // / o \ // // /_______\ // /////////////////////////////////////////////// ///////////////////////////////////////// // -- FINALISATION 2: PRINT SUMMARY -- // ///////////////////////////////////////// if(my_rank == MASTER_PROCESS_RANK) { printf("The program took %.2f seconds in total and executed %d iterations.\n", total_time_so_far, iteration_count); } MPI_Finalize(); return EXIT_SUCCESS; }
/** * @file main.c * @brief This file contains the source code of the application to parallelise. * @details This application is a classic heat spread simulation. * @author Ludovic Capelli **/ #include <stdio.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> #include <inttypes.h> #include <math.h> #include <sched.h> #include <unistd.h> #include <string.h> #include "util.h" /** * @argv[0] Name of the program * @argv[1] path to the dataset to load **/ int main(int argc, char* argv[]) { (void)argc; (void)argv; MPI_Init(NULL, NULL); ///////////////////////////////////////////////////// // -- PREPARATION 1: COLLECT USEFUL INFORMATION -- // ///////////////////////////////////////////////////// // Ranks for convenience so that we don't throw raw values all over the code const int MASTER_PROCESS_RANK = 0; // The rank of the MPI process in charge of this instance int my_rank; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Number of MPI processes in total, commonly called "comm_size" for "communicator size". int comm_size; MPI_Comm_size(MPI_COMM_WORLD, &comm_size); /// Rank of the first MPI process const int FIRST_PROCESS_RANK = 0; /// Rank of the last MPI process const int LAST_PROCESS_RANK = comm_size - 1; // Rank of my up neighbour if any int up_neighbour_rank = (my_rank == FIRST_PROCESS_RANK) ? MPI_PROC_NULL : my_rank - 1; // Rank of my down neighbour if any int down_neighbour_rank = (my_rank == LAST_PROCESS_RANK) ? MPI_PROC_NULL : my_rank + 1; //report_placement(); //////////////////////////////////////////////////////////////////// // -- PREPARATION 2: INITIALISE TEMPERATURES ON MASTER PROCESS -- // //////////////////////////////////////////////////////////////////// /// Array that will contain my part chunk. It will include the 2 ghost rows (1 up, 1 down) double temperatures[ROWS_PER_MPI_PROCESS+2][COLUMNS_PER_MPI_PROCESS]; /// Temperatures from the previous iteration, same dimensions as the array above. double temperatures_last[ROWS_PER_MPI_PROCESS+2][COLUMNS_PER_MPI_PROCESS]; /// On master process only: contains all temperatures read from input file. double all_temperatures[ROWS][COLUMNS]; // The master MPI process will read a chunk from the file, send it to the corresponding MPI process and repeat until all chunks are read. if(my_rank == MASTER_PROCESS_RANK) { initialise_temperatures(all_temperatures); } MPI_Barrier(MPI_COMM_WORLD); /////////////////////////////////////////// // ^ // // / \ // // / | \ CODE FROM HERE IS TIMED // // / o \ // // /_______\ // /////////////////////////////////////////// //////////////////////////////////////////////////////// // -- TASK 1: DISTRIBUTE DATA TO ALL MPI PROCESSES -- // //////////////////////////////////////////////////////// double total_time_so_far = 0.0; double start_time = MPI_Wtime(); if(my_rank == MASTER_PROCESS_RANK) { for(int i = 0; i < comm_size; i++) { // Is the i'th chunk meant for me, the master MPI process? if(i != my_rank) { // No, so send the corresponding chunk to that MPI process. MPI_Ssend(&all_temperatures[i * ROWS_PER_MPI_PROCESS][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, i, 0, MPI_COMM_WORLD); } else { // Yes, let's copy it straight for the array in which we read the file into. for(int j = 1; j <= ROWS_PER_MPI_PROCESS; j++) { for(int k = 0; k < COLUMNS_PER_MPI_PROCESS; k++) { temperatures_last[j][k] = all_temperatures[j-1][k]; } } } } } else { // Receive my chunk. MPI_Recv(&temperatures_last[1][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } // MPI_Scatter(all_temperatures, ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, &temperatures_last[1][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_COMM_WORLD); // Copy the temperatures into the current iteration temperature as well for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++) { for(int j = 0; j < COLUMNS_PER_MPI_PROCESS; j++) { temperatures[i][j] = temperatures_last[i][j]; } } if(my_rank == MASTER_PROCESS_RANK) { printf("Data acquisition complete.\n"); } // Wait for everybody to receive their part before we can start processing MPI_Barrier(MPI_COMM_WORLD); ///////////////////////////// // TASK 2: DATA PROCESSING // ///////////////////////////// int iteration_count = 0; /// Maximum temperature change observed across all MPI processes double global_temperature_change; /// Maximum temperature change for us double my_temperature_change; /// The last snapshot made double snapshot[ROWS][COLUMNS]; while(total_time_so_far < MAX_TIME) { my_temperature_change = 0.0; // //////////////////////////////////////// // -- SUBTASK 1: EXCHANGE GHOST CELLS -- // // //////////////////////////////////////// // Send data to up neighbour for its ghost cells. If my up_neighbour_rank is MPI_PROC_NULL, this MPI_Ssend will do nothing. MPI_Ssend(&temperatures[1][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, up_neighbour_rank, 0, MPI_COMM_WORLD); // Receive data from down neighbour to fill our ghost cells. If my down_neighbour_rank is MPI_PROC_NULL, this MPI_Recv will do nothing. MPI_Recv(&temperatures_last[ROWS_PER_MPI_PROCESS+1][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, down_neighbour_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); // Send data to down neighbour for its ghost cells. If my down_neighbour_rank is MPI_PROC_NULL, this MPI_Ssend will do nothing. MPI_Ssend(&temperatures[ROWS_PER_MPI_PROCESS][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, down_neighbour_rank, 0, MPI_COMM_WORLD); // Receive data from up neighbour to fill our ghost cells. If my up_neighbour_rank is MPI_PROC_NULL, this MPI_Recv will do nothing. MPI_Recv(&temperatures_last[0][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, up_neighbour_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); // ///////////// Using SendRecv // // Send data to up neighbour from down neighbour // MPI_Sendrecv(&temperatures[1][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, up_neighbour_rank, 0, &temperatures_last[ROWS_PER_MPI_PROCESS+1][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, down_neighbour_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); // // Send data to down neighbour from up neighbour // MPI_Sendrecv(&temperatures[ROWS_PER_MPI_PROCESS][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, down_neighbour_rank, 0, &temperatures_last[0][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, up_neighbour_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); ///////////////////////////////////////////// // -- SUBTASK 2: PROPAGATE TEMPERATURES -- // ///////////////////////////////////////////// for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++) { // Process all cells between the first and last columns excluded, which each has both left and right neighbours for(int j = 1; j < COLUMNS_PER_MPI_PROCESS - 1; j++) { if(temperatures[i][j] != MAX_TEMPERATURE) { temperatures[i][j] = 0.25 * (temperatures_last[i-1][j ] + temperatures_last[i+1][j ] + temperatures_last[i ][j-1] + temperatures_last[i ][j+1]); } } } for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++) { // Process the cell at the first column, which has no left neighbour if(temperatures[i][0] != MAX_TEMPERATURE) { temperatures[i][0] = (temperatures_last[i-1][0] + temperatures_last[i+1][0] + temperatures_last[i ][1]) / 3.0; } } for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++) { // Process the cell at the last column, which has no right neighbour if(temperatures[i][COLUMNS_PER_MPI_PROCESS - 1] != MAX_TEMPERATURE) { temperatures[i][COLUMNS_PER_MPI_PROCESS - 1] = (temperatures_last[i-1][COLUMNS_PER_MPI_PROCESS - 1] + temperatures_last[i+1][COLUMNS_PER_MPI_PROCESS - 1] + temperatures_last[i ][COLUMNS_PER_MPI_PROCESS - 2]) / 3.0; } } // Start the gather of the snapshot here MPI_Request gather_request; if(iteration_count % SNAPSHOT_INTERVAL == 0) { MPI_Igather(&temperatures[1][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, snapshot, ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_COMM_WORLD, &gather_request); } /////////////////////////////////////////////////////// // -- SUBTASK 3: CALCULATE MAX TEMPERATURE CHANGE -- // /////////////////////////////////////////////////////// my_temperature_change = 0.0; for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++) { for(int j = 0; j < COLUMNS_PER_MPI_PROCESS; j++) { my_temperature_change = fmax(fabs(temperatures[i][j] - temperatures_last[i][j]), my_temperature_change); temperatures_last[i][j] = temperatures[i][j]; } } ////////////////////////////////////////////////////////// // -- SUBTASK 4: FIND MAX TEMPERATURE CHANGE OVERALL -- // ////////////////////////////////////////////////////////// MPI_Request allreduce_request; MPI_Iallreduce(&my_temperature_change, &global_temperature_change, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD, &allreduce_request); // MPI_Allreduce(&my_temperature_change, &global_temperature_change, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); // Wait for the all reduce to find the max temp to complete MPI_Wait(&allreduce_request, MPI_STATUS_IGNORE); /////////////////////////////////// // -- SUBTASK 6: GET SNAPSHOT -- // /////////////////////////////////// if(iteration_count % SNAPSHOT_INTERVAL == 0) { if(my_rank == MASTER_PROCESS_RANK) { // Wait there to gather the snapshot MPI_Wait(&gather_request, MPI_STATUS_IGNORE); printf("Iteration %d: %.18f\n", iteration_count, global_temperature_change); } // MPI_Gather(&temperatures[1][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, snapshot, ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_COMM_WORLD); } // Calculate the total time spent processing if(my_rank == MASTER_PROCESS_RANK) { total_time_so_far = MPI_Wtime() - start_time; } // Send total timer to everybody so they too can exit the loop if more than the allowed runtime has elapsed already MPI_Bcast(&total_time_so_far, 1, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_COMM_WORLD); // Update the iteration number iteration_count++; } /////////////////////////////////////////////// // ^ // // / \ // // / | \ CODE FROM HERE IS NOT TIMED // // / o \ // // /_______\ // /////////////////////////////////////////////// ///////////////////////////////////////// // -- FINALISATION 2: PRINT SUMMARY -- // ///////////////////////////////////////// if(my_rank == MASTER_PROCESS_RANK) { printf("The program took %.2f seconds in total and executed %d iterations.\n", total_time_so_far, iteration_count); } MPI_Finalize(); return EXIT_SUCCESS; }
/** * @file main.c * @brief This file contains the source code of the application to parallelise. * @details This application is a classic heat spread simulation. * @author Ludovic Capelli **/ #include <stdio.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> #include <inttypes.h> #include <math.h> #include <sched.h> #include <unistd.h> #include <string.h> #include "util.h" /** * @argv[0] Name of the program * @argv[1] path to the dataset to load **/ int main(int argc, char* argv[]) { (void)argc; (void)argv; MPI_Init(NULL, NULL); ///////////////////////////////////////////////////// // -- PREPARATION 1: COLLECT USEFUL INFORMATION -- // ///////////////////////////////////////////////////// // Ranks for convenience so that we don't throw raw values all over the code const int MASTER_PROCESS_RANK = 0; // The rank of the MPI process in charge of this instance int my_rank; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Number of MPI processes in total, commonly called "comm_size" for "communicator size". int comm_size; MPI_Comm_size(MPI_COMM_WORLD, &comm_size); /// Rank of the first MPI process const int FIRST_PROCESS_RANK = 0; /// Rank of the last MPI process const int LAST_PROCESS_RANK = comm_size - 1; // Rank of my up neighbour if any int up_neighbour_rank = (my_rank == FIRST_PROCESS_RANK) ? MPI_PROC_NULL : my_rank - 1; // Rank of my down neighbour if any int down_neighbour_rank = (my_rank == LAST_PROCESS_RANK) ? MPI_PROC_NULL : my_rank + 1; //report_placement(); //////////////////////////////////////////////////////////////////// // -- PREPARATION 2: INITIALISE TEMPERATURES ON MASTER PROCESS -- // //////////////////////////////////////////////////////////////////// /// Array that will contain my part chunk. It will include the 2 ghost rows (1 up, 1 down) double temperatures[ROWS_PER_MPI_PROCESS+2][COLUMNS_PER_MPI_PROCESS]; /// Temperatures from the previous iteration, same dimensions as the array above. double temperatures_last[ROWS_PER_MPI_PROCESS+2][COLUMNS_PER_MPI_PROCESS]; /// On master process only: contains all temperatures read from input file. double all_temperatures[ROWS][COLUMNS]; // The master MPI process will read a chunk from the file, send it to the corresponding MPI process and repeat until all chunks are read. if(my_rank == MASTER_PROCESS_RANK) { initialise_temperatures(all_temperatures); } MPI_Barrier(MPI_COMM_WORLD); /////////////////////////////////////////// // ^ // // / \ // // / | \ CODE FROM HERE IS TIMED // // / o \ // // /_______\ // /////////////////////////////////////////// //////////////////////////////////////////////////////// // -- TASK 1: DISTRIBUTE DATA TO ALL MPI PROCESSES -- // //////////////////////////////////////////////////////// double total_time_so_far = 0.0; double start_time = MPI_Wtime(); if(my_rank == MASTER_PROCESS_RANK) { for(int i = 0; i < comm_size; i++) { // Is the i'th chunk meant for me, the master MPI process? if(i != my_rank) { // No, so send the corresponding chunk to that MPI process. MPI_Ssend(&all_temperatures[i * ROWS_PER_MPI_PROCESS][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, i, 0, MPI_COMM_WORLD); } else { // Yes, let's copy it straight for the array in which we read the file into. for(int j = 1; j <= ROWS_PER_MPI_PROCESS; j++) { for(int k = 0; k < COLUMNS_PER_MPI_PROCESS; k++) { temperatures_last[j][k] = all_temperatures[j-1][k]; } } } } } else { // Receive my chunk. MPI_Recv(&temperatures_last[1][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } // MPI_Scatter(all_temperatures, ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, &temperatures_last[1][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_COMM_WORLD); // Copy the temperatures into the current iteration temperature as well #pragma omp parallel for shared(temperatures, temperatures_last, ROWS_PER_MPI_PROCESS, ROWS_PER_MPI_PROCESS) collapse(2) for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++) { for(int j = 0; j < COLUMNS_PER_MPI_PROCESS; j++) { temperatures[i][j] = temperatures_last[i][j]; } } if(my_rank == MASTER_PROCESS_RANK) { printf("Data acquisition complete.\n"); } // Wait for everybody to receive their part before we can start processing MPI_Barrier(MPI_COMM_WORLD); ///////////////////////////// // TASK 2: DATA PROCESSING // ///////////////////////////// int iteration_count = 0; /// Maximum temperature change observed across all MPI processes double global_temperature_change; /// Maximum temperature change for us double my_temperature_change; /// The last snapshot made double snapshot[ROWS][COLUMNS]; while(total_time_so_far < MAX_TIME) { my_temperature_change = 0.0; // //////////////////////////////////////// // -- SUBTASK 1: EXCHANGE GHOST CELLS -- // // //////////////////////////////////////// // Send data to up neighbour for its ghost cells. If my up_neighbour_rank is MPI_PROC_NULL, this MPI_Ssend will do nothing. MPI_Ssend(&temperatures[1][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, up_neighbour_rank, 0, MPI_COMM_WORLD); // Receive data from down neighbour to fill our ghost cells. If my down_neighbour_rank is MPI_PROC_NULL, this MPI_Recv will do nothing. MPI_Recv(&temperatures_last[ROWS_PER_MPI_PROCESS+1][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, down_neighbour_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); // Send data to down neighbour for its ghost cells. If my down_neighbour_rank is MPI_PROC_NULL, this MPI_Ssend will do nothing. MPI_Ssend(&temperatures[ROWS_PER_MPI_PROCESS][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, down_neighbour_rank, 0, MPI_COMM_WORLD); // Receive data from up neighbour to fill our ghost cells. If my up_neighbour_rank is MPI_PROC_NULL, this MPI_Recv will do nothing. MPI_Recv(&temperatures_last[0][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, up_neighbour_rank, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); // ///////////// Using SendRecv // // Send data to up neighbour from down neighbour // MPI_Sendrecv(&temperatures[1][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, up_neighbour_rank, 0, &temperatures_last[ROWS_PER_MPI_PROCESS+1][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, down_neighbour_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); // // Send data to down neighbour from up neighbour // MPI_Sendrecv(&temperatures[ROWS_PER_MPI_PROCESS][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, down_neighbour_rank, 0, &temperatures_last[0][0], COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, up_neighbour_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); ///////////////////////////////////////////// // -- SUBTASK 2: PROPAGATE TEMPERATURES -- // ///////////////////////////////////////////// #pragma omp parallel for shared(temperatures, temperatures_last, ROWS_PER_MPI_PROCESS, ROWS_PER_MPI_PROCESS, MAX_TEMPERATURE) collapse(2) for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++) { // Process all cells between the first and last columns excluded, which each has both left and right neighbours for(int j = 1; j < COLUMNS_PER_MPI_PROCESS - 1; j++) { if(temperatures[i][j] != MAX_TEMPERATURE) { temperatures[i][j] = 0.25 * (temperatures_last[i-1][j ] + temperatures_last[i+1][j ] + temperatures_last[i ][j-1] + temperatures_last[i ][j+1]); } } } #pragma omp parallel for shared(temperatures, temperatures_last, ROWS_PER_MPI_PROCESS, ROWS_PER_MPI_PROCESS, MAX_TEMPERATURE) for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++) { // Process the cell at the first column, which has no left neighbour if(temperatures[i][0] != MAX_TEMPERATURE) { temperatures[i][0] = (temperatures_last[i-1][0] + temperatures_last[i+1][0] + temperatures_last[i ][1]) / 3.0; } } #pragma omp parallel for shared(temperatures, temperatures_last, ROWS_PER_MPI_PROCESS, ROWS_PER_MPI_PROCESS, MAX_TEMPERATURE) for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++) { // Process the cell at the last column, which has no right neighbour if(temperatures[i][COLUMNS_PER_MPI_PROCESS - 1] != MAX_TEMPERATURE) { temperatures[i][COLUMNS_PER_MPI_PROCESS - 1] = (temperatures_last[i-1][COLUMNS_PER_MPI_PROCESS - 1] + temperatures_last[i+1][COLUMNS_PER_MPI_PROCESS - 1] + temperatures_last[i ][COLUMNS_PER_MPI_PROCESS - 2]) / 3.0; } } // Start the gather of the snapshot here MPI_Request gather_request; if(iteration_count % SNAPSHOT_INTERVAL == 0) { MPI_Igather(&temperatures[1][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, snapshot, ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_COMM_WORLD, &gather_request); } /////////////////////////////////////////////////////// // -- SUBTASK 3: CALCULATE MAX TEMPERATURE CHANGE -- // /////////////////////////////////////////////////////// my_temperature_change = 0.0; #pragma omp parallel for shared(temperatures, temperatures_last, ROWS_PER_MPI_PROCESS, ROWS_PER_MPI_PROCESS) collapse(2) reduciton(max:my_temperature_change) for(int i = 1; i <= ROWS_PER_MPI_PROCESS; i++) { for(int j = 0; j < COLUMNS_PER_MPI_PROCESS; j++) { my_temperature_change = fmax(fabs(temperatures[i][j] - temperatures_last[i][j]), my_temperature_change); temperatures_last[i][j] = temperatures[i][j]; } } ////////////////////////////////////////////////////////// // -- SUBTASK 4: FIND MAX TEMPERATURE CHANGE OVERALL -- // ////////////////////////////////////////////////////////// MPI_Request allreduce_request; MPI_Iallreduce(&my_temperature_change, &global_temperature_change, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD, &allreduce_request); // MPI_Allreduce(&my_temperature_change, &global_temperature_change, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); // Wait for the all reduce to find the max temp to complete MPI_Wait(&allreduce_request, MPI_STATUS_IGNORE); /////////////////////////////////// // -- SUBTASK 6: GET SNAPSHOT -- // /////////////////////////////////// if(iteration_count % SNAPSHOT_INTERVAL == 0) { if(my_rank == MASTER_PROCESS_RANK) { // Wait there to gather the snapshot MPI_Wait(&gather_request, MPI_STATUS_IGNORE); printf("Iteration %d: %.18f\n", iteration_count, global_temperature_change); } // MPI_Gather(&temperatures[1][0], ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, snapshot, ROWS_PER_MPI_PROCESS * COLUMNS_PER_MPI_PROCESS, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_COMM_WORLD); } // Calculate the total time spent processing if(my_rank == MASTER_PROCESS_RANK) { total_time_so_far = MPI_Wtime() - start_time; } // Send total timer to everybody so they too can exit the loop if more than the allowed runtime has elapsed already MPI_Bcast(&total_time_so_far, 1, MPI_DOUBLE, MASTER_PROCESS_RANK, MPI_COMM_WORLD); // Update the iteration number iteration_count++; } /////////////////////////////////////////////// // ^ // // / \ // // / | \ CODE FROM HERE IS NOT TIMED // // / o \ // // /_______\ // /////////////////////////////////////////////// ///////////////////////////////////////// // -- FINALISATION 2: PRINT SUMMARY -- // ///////////////////////////////////////// if(my_rank == MASTER_PROCESS_RANK) { printf("The program took %.2f seconds in total and executed %d iterations.\n", total_time_so_far, iteration_count); } MPI_Finalize(); return EXIT_SUCCESS; }
query-veb.h
/* * Copyright 2018-2021 Kyle Berney, Ben Karsin * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef QUERY_VEB_H #define QUERY_VEB_H #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <math.h> #include <omp.h> #include <time.h> #include "params.h" #include "common.h" struct vEB_table { uint64_t L; //size of the bottom/leaf tree uint64_t R; //size of the corresponding top/root tree uint32_t D; //depth of the root of the corresponding top/root tree }; void buildTable(vEB_table *table, uint64_t n, uint32_t d, uint32_t root_depth); //Searches given perfect vEB tree layout for the query'd element using the table composed of L, R, and D //Assumes L, R, and D have been initialized via buildTable() //pos is an array of size d which is used to store the 1-indexed position of the node visited during the query at current depth in the vEB tree //Returns index of query'd element (if found) //Otherwise, returns n (element not found) template<typename TYPE> uint64_t searchvEB(TYPE *A, vEB_table *table, uint64_t n, uint32_t d, TYPE query, uint64_t *pos, uint64_t tid) { TYPE current; uint32_t current_d = 0; uint64_t i = 1; //1-indexed position of the current node in a BFS (i.e, level order) binary search tree pos[0] = 1; uint64_t index = 0; //0-indexed position of the current node in the vEB tree while (index < n) { current = A[index]; if (query == current) { return index; } i = 2*i + (query > current); current_d++; pos[current_d] = pos[table[current_d].D] + table[current_d].R + (i & table[current_d].R) * table[current_d].L; index = pos[current_d] - 1; } return n; } //Performs all of the queries given in the array queries //index in A of the queried items are saved in the answers array template<typename TYPE> void searchAll(TYPE *A, vEB_table *table, uint64_t n, uint32_t d, TYPE *queries, uint64_t *answers, uint64_t numQueries, uint32_t p) { #pragma omp parallel shared(A, table, n, d, queries, answers, numQueries, p) num_threads(p) { uint64_t pos[d]; uint64_t tid = omp_get_thread_num(); for (uint64_t i = tid; i < numQueries; i += p) { answers[i] = searchvEB<TYPE>(A, table, n, d, queries[i], pos, tid); } } } //Generates numQueries random queries and returns the milliseconds needed to perform the queries on the given vEB tree layout template<typename TYPE> double timeQueryvEB(TYPE *A, vEB_table *table, uint64_t n, uint32_t d, uint64_t numQueries, uint32_t p) { struct timespec start, end; TYPE *queries = createRandomQueries<TYPE>(A, n, numQueries); //array to store random queries to perform uint64_t *answers = (uint64_t *)malloc(numQueries * sizeof(uint64_t)); //array to store the answers (i.e., index of the queried item) clock_gettime(CLOCK_MONOTONIC, &start); searchAll<TYPE>(A, table, n, d, queries, answers, numQueries, p); clock_gettime(CLOCK_MONOTONIC, &end); double ms = ((end.tv_sec*1000000000. + end.tv_nsec) - (start.tv_sec*1000000000. + start.tv_nsec)) / 1000000.; //millisecond #ifdef VERIFY bool correct = true; for (uint64_t i = 0; i < numQueries; i++) { if (answers[i] == n || A[answers[i]] != queries[i] || answers[i] == n) { #ifdef DEBUG printf("query = %lu; A[%lu] = %lu\n", queries[i], answers[i], A[answers[i]]); #endif correct = false; } } if (correct == false) printf("Searches failed!\n"); else printf("Searches succeeded!\n"); #endif free(queries); free(answers); return ms; } //Searches given non-perfect vEB layout for the query'd element using the tables composed of L, R, and D //tables is an array of size num_tables pointing to each vEB table used for querying //E.g., tables[0] is used to query the full root and leaf subtrees //and tables[1] is used to query the first incomplete leaf subtree //Each subsequent entry in queries is used to query the next recursive incomplete leaf subtree //idx is and array of size num_tables which gives the index of where the next incomplete leaf subtree starts //Assumes all tables L, R, and D have been initialized via buildTable() //pos is an array of size d which is used to store the 1-indexed position of the node visited during the query at current depth in the vEB tree //Returns index of query'd element (if found) //Otherwise, returns n (element not found) template<typename TYPE> uint64_t searchvEB_nonperfect(TYPE *A, vEB_table **tables, uint64_t *idx, uint32_t num_tables, TYPE query, uint64_t *pos, uint64_t tid) { TYPE current; uint32_t current_d; uint64_t i, index; TYPE *a = A; uint64_t offset = 0; uint32_t j = 0; while (j < num_tables) { current_d = 0; i = 1; //1-indexed position of the current node in a BFS (i.e, level order) binary search tree pos[0] = 1; index = 0; //0-indexed position of the current node in the vEB tree while (index + offset < idx[j]) { current = a[index]; if (query == current) { return offset + index; } i = 2*i + (query > current); current_d++; pos[current_d] = pos[tables[j][current_d].D] + tables[j][current_d].R + (i & tables[j][current_d].R) * tables[j][current_d].L; index = pos[current_d] - 1; } a = &A[idx[j]]; offset = idx[j]; ++j; } return idx[num_tables-1]; //return n } //Performs all of the queries given in the array queries //index in A of the queried items are saved in the answers array template<typename TYPE> void searchAll_nonperfect(TYPE *A, vEB_table **tables, uint64_t *idx, uint32_t num_tables, uint32_t d, TYPE *queries, uint64_t *answers, uint64_t numQueries, uint32_t p) { #pragma omp parallel shared(A, tables, idx, num_tables, d, queries, answers, numQueries, p) num_threads(p) { uint64_t pos[d]; uint64_t tid = omp_get_thread_num(); for (uint64_t i = tid; i < numQueries; i += p) { answers[i] = searchvEB_nonperfect<TYPE>(A, tables, idx, num_tables, queries[i], pos, tid); } } } //Generates numQueries random queries and returns the milliseconds needed to perform the queries on the given vEB tree layout template<typename TYPE> double timeQueryvEB_nonperfect(TYPE *A, vEB_table **tables, uint64_t *idx, uint32_t num_tables, uint64_t n, uint32_t d, uint64_t numQueries, uint32_t p) { struct timespec start, end; TYPE *queries = createRandomQueries<TYPE>(A, n, numQueries); //array to store random queries to perform uint64_t *answers = (uint64_t *)malloc(numQueries * sizeof(uint64_t)); //array to store the answers (i.e., index of the queried item) clock_gettime(CLOCK_MONOTONIC, &start); searchAll_nonperfect<TYPE>(A, tables, idx, num_tables, d, queries, answers, numQueries, p); clock_gettime(CLOCK_MONOTONIC, &end); double ms = ((end.tv_sec*1000000000. + end.tv_nsec) - (start.tv_sec*1000000000. + start.tv_nsec)) / 1000000.; //millisecond #ifdef VERIFY bool correct = true; for (uint64_t i = 0; i < numQueries; i++) { if (answers[i] == n || A[answers[i]] != queries[i] || answers[i] == n) { #ifdef DEBUG printf("query = %lu; A[%lu] = %lu\n", queries[i], answers[i], A[answers[i]]); #endif correct = false; } } if (correct == false) printf("Searches failed!\n"); else printf("Searches succeeded!\n"); #endif free(queries); free(answers); return ms; } #endif
#ifndef QUERY_VEB_H #define QUERY_VEB_H #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <math.h> #include <omp.h> #include <time.h> #include "params.h" #include "common.h" struct vEB_table { uint64_t L; //size of the bottom/leaf tree uint64_t R; //size of the corresponding top/root tree uint32_t D; //depth of the root of the corresponding top/root tree }; void buildTable(vEB_table *table, uint64_t n, uint32_t d, uint32_t root_depth); //Searches given perfect vEB tree layout for the query'd element using the table composed of L, R, and D //Assumes L, R, and D have been initialized via buildTable() //pos is an array of size d which is used to store the 1-indexed position of the node visited during the query at current depth in the vEB tree //Returns index of query'd element (if found) //Otherwise, returns n (element not found) template<typename TYPE> uint64_t searchvEB(TYPE *A, vEB_table *table, uint64_t n, uint32_t d, TYPE query, uint64_t *pos, uint64_t tid) { TYPE current; uint32_t current_d = 0; uint64_t i = 1; //1-indexed position of the current node in a BFS (i.e, level order) binary search tree pos[0] = 1; uint64_t index = 0; //0-indexed position of the current node in the vEB tree while (index < n) { current = A[index]; if (query == current) { return index; } i = 2*i + (query > current); current_d++; pos[current_d] = pos[table[current_d].D] + table[current_d].R + (i & table[current_d].R) * table[current_d].L; index = pos[current_d] - 1; } return n; } //Performs all of the queries given in the array queries //index in A of the queried items are saved in the answers array template<typename TYPE> void searchAll(TYPE *A, vEB_table *table, uint64_t n, uint32_t d, TYPE *queries, uint64_t *answers, uint64_t numQueries, uint32_t p) { uint64_t pos[d]; uint64_t tid = omp_get_thread_num(); for (uint64_t i = tid; i < numQueries; i += p) { answers[i] = searchvEB<TYPE>(A, table, n, d, queries[i], pos, tid); } } //Generates numQueries random queries and returns the milliseconds needed to perform the queries on the given vEB tree layout template<typename TYPE> double timeQueryvEB(TYPE *A, vEB_table *table, uint64_t n, uint32_t d, uint64_t numQueries, uint32_t p) { struct timespec start, end; TYPE *queries = createRandomQueries<TYPE>(A, n, numQueries); //array to store random queries to perform uint64_t *answers = (uint64_t *)malloc(numQueries * sizeof(uint64_t)); //array to store the answers (i.e., index of the queried item) clock_gettime(CLOCK_MONOTONIC, &start); searchAll<TYPE>(A, table, n, d, queries, answers, numQueries, p); clock_gettime(CLOCK_MONOTONIC, &end); double ms = ((end.tv_sec*1000000000. + end.tv_nsec) - (start.tv_sec*1000000000. + start.tv_nsec)) / 1000000.; //millisecond #ifdef VERIFY bool correct = true; for (uint64_t i = 0; i < numQueries; i++) { if (answers[i] == n || A[answers[i]] != queries[i] || answers[i] == n) { #ifdef DEBUG printf("query = %lu; A[%lu] = %lu\n", queries[i], answers[i], A[answers[i]]); #endif correct = false; } } if (correct == false) printf("Searches failed!\n"); else printf("Searches succeeded!\n"); #endif free(queries); free(answers); return ms; } //Searches given non-perfect vEB layout for the query'd element using the tables composed of L, R, and D //tables is an array of size num_tables pointing to each vEB table used for querying //E.g., tables[0] is used to query the full root and leaf subtrees //and tables[1] is used to query the first incomplete leaf subtree //Each subsequent entry in queries is used to query the next recursive incomplete leaf subtree //idx is and array of size num_tables which gives the index of where the next incomplete leaf subtree starts //Assumes all tables L, R, and D have been initialized via buildTable() //pos is an array of size d which is used to store the 1-indexed position of the node visited during the query at current depth in the vEB tree //Returns index of query'd element (if found) //Otherwise, returns n (element not found) template<typename TYPE> uint64_t searchvEB_nonperfect(TYPE *A, vEB_table **tables, uint64_t *idx, uint32_t num_tables, TYPE query, uint64_t *pos, uint64_t tid) { TYPE current; uint32_t current_d; uint64_t i, index; TYPE *a = A; uint64_t offset = 0; uint32_t j = 0; while (j < num_tables) { current_d = 0; i = 1; //1-indexed position of the current node in a BFS (i.e, level order) binary search tree pos[0] = 1; index = 0; //0-indexed position of the current node in the vEB tree while (index + offset < idx[j]) { current = a[index]; if (query == current) { return offset + index; } i = 2*i + (query > current); current_d++; pos[current_d] = pos[tables[j][current_d].D] + tables[j][current_d].R + (i & tables[j][current_d].R) * tables[j][current_d].L; index = pos[current_d] - 1; } a = &A[idx[j]]; offset = idx[j]; ++j; } return idx[num_tables-1]; //return n } //Performs all of the queries given in the array queries //index in A of the queried items are saved in the answers array template<typename TYPE> void searchAll_nonperfect(TYPE *A, vEB_table **tables, uint64_t *idx, uint32_t num_tables, uint32_t d, TYPE *queries, uint64_t *answers, uint64_t numQueries, uint32_t p) { uint64_t pos[d]; uint64_t tid = omp_get_thread_num(); for (uint64_t i = tid; i < numQueries; i += p) { answers[i] = searchvEB_nonperfect<TYPE>(A, tables, idx, num_tables, queries[i], pos, tid); } } //Generates numQueries random queries and returns the milliseconds needed to perform the queries on the given vEB tree layout template<typename TYPE> double timeQueryvEB_nonperfect(TYPE *A, vEB_table **tables, uint64_t *idx, uint32_t num_tables, uint64_t n, uint32_t d, uint64_t numQueries, uint32_t p) { struct timespec start, end; TYPE *queries = createRandomQueries<TYPE>(A, n, numQueries); //array to store random queries to perform uint64_t *answers = (uint64_t *)malloc(numQueries * sizeof(uint64_t)); //array to store the answers (i.e., index of the queried item) clock_gettime(CLOCK_MONOTONIC, &start); searchAll_nonperfect<TYPE>(A, tables, idx, num_tables, d, queries, answers, numQueries, p); clock_gettime(CLOCK_MONOTONIC, &end); double ms = ((end.tv_sec*1000000000. + end.tv_nsec) - (start.tv_sec*1000000000. + start.tv_nsec)) / 1000000.; //millisecond #ifdef VERIFY bool correct = true; for (uint64_t i = 0; i < numQueries; i++) { if (answers[i] == n || A[answers[i]] != queries[i] || answers[i] == n) { #ifdef DEBUG printf("query = %lu; A[%lu] = %lu\n", queries[i], answers[i], A[answers[i]]); #endif correct = false; } } if (correct == false) printf("Searches failed!\n"); else printf("Searches succeeded!\n"); #endif free(queries); free(answers); return ms; } #endif
#ifndef QUERY_VEB_H #define QUERY_VEB_H #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <math.h> #include <omp.h> #include <time.h> #include "params.h" #include "common.h" struct vEB_table { uint64_t L; //size of the bottom/leaf tree uint64_t R; //size of the corresponding top/root tree uint32_t D; //depth of the root of the corresponding top/root tree }; void buildTable(vEB_table *table, uint64_t n, uint32_t d, uint32_t root_depth); //Searches given perfect vEB tree layout for the query'd element using the table composed of L, R, and D //Assumes L, R, and D have been initialized via buildTable() //pos is an array of size d which is used to store the 1-indexed position of the node visited during the query at current depth in the vEB tree //Returns index of query'd element (if found) //Otherwise, returns n (element not found) template<typename TYPE> uint64_t searchvEB(TYPE *A, vEB_table *table, uint64_t n, uint32_t d, TYPE query, uint64_t *pos, uint64_t tid) { TYPE current; uint32_t current_d = 0; uint64_t i = 1; //1-indexed position of the current node in a BFS (i.e, level order) binary search tree pos[0] = 1; uint64_t index = 0; //0-indexed position of the current node in the vEB tree while (index < n) { current = A[index]; if (query == current) { return index; } i = 2*i + (query > current); current_d++; pos[current_d] = pos[table[current_d].D] + table[current_d].R + (i & table[current_d].R) * table[current_d].L; index = pos[current_d] - 1; } return n; } //Performs all of the queries given in the array queries //index in A of the queried items are saved in the answers array template<typename TYPE> void searchAll(TYPE *A, vEB_table *table, uint64_t n, uint32_t d, TYPE *queries, uint64_t *answers, uint64_t numQueries, uint32_t p) { #pragma omp parallel shared(A, table, n, d, queries, answers, numQueries, p) num_threads(p) { uint64_t pos[d]; uint64_t tid = omp_get_thread_num(); for (uint64_t i = tid; i < numQueries; i += p) { answers[i] = searchvEB<TYPE>(A, table, n, d, queries[i], pos, tid); } } } //Generates numQueries random queries and returns the milliseconds needed to perform the queries on the given vEB tree layout template<typename TYPE> double timeQueryvEB(TYPE *A, vEB_table *table, uint64_t n, uint32_t d, uint64_t numQueries, uint32_t p) { struct timespec start, end; TYPE *queries = createRandomQueries<TYPE>(A, n, numQueries); //array to store random queries to perform uint64_t *answers = (uint64_t *)malloc(numQueries * sizeof(uint64_t)); //array to store the answers (i.e., index of the queried item) clock_gettime(CLOCK_MONOTONIC, &start); searchAll<TYPE>(A, table, n, d, queries, answers, numQueries, p); clock_gettime(CLOCK_MONOTONIC, &end); double ms = ((end.tv_sec*1000000000. + end.tv_nsec) - (start.tv_sec*1000000000. + start.tv_nsec)) / 1000000.; //millisecond #ifdef VERIFY bool correct = true; for (uint64_t i = 0; i < numQueries; i++) { if (answers[i] == n || A[answers[i]] != queries[i] || answers[i] == n) { #ifdef DEBUG printf("query = %lu; A[%lu] = %lu\n", queries[i], answers[i], A[answers[i]]); #endif correct = false; } } if (correct == false) printf("Searches failed!\n"); else printf("Searches succeeded!\n"); #endif free(queries); free(answers); return ms; } //Searches given non-perfect vEB layout for the query'd element using the tables composed of L, R, and D //tables is an array of size num_tables pointing to each vEB table used for querying //E.g., tables[0] is used to query the full root and leaf subtrees //and tables[1] is used to query the first incomplete leaf subtree //Each subsequent entry in queries is used to query the next recursive incomplete leaf subtree //idx is and array of size num_tables which gives the index of where the next incomplete leaf subtree starts //Assumes all tables L, R, and D have been initialized via buildTable() //pos is an array of size d which is used to store the 1-indexed position of the node visited during the query at current depth in the vEB tree //Returns index of query'd element (if found) //Otherwise, returns n (element not found) template<typename TYPE> uint64_t searchvEB_nonperfect(TYPE *A, vEB_table **tables, uint64_t *idx, uint32_t num_tables, TYPE query, uint64_t *pos, uint64_t tid) { TYPE current; uint32_t current_d; uint64_t i, index; TYPE *a = A; uint64_t offset = 0; uint32_t j = 0; while (j < num_tables) { current_d = 0; i = 1; //1-indexed position of the current node in a BFS (i.e, level order) binary search tree pos[0] = 1; index = 0; //0-indexed position of the current node in the vEB tree while (index + offset < idx[j]) { current = a[index]; if (query == current) { return offset + index; } i = 2*i + (query > current); current_d++; pos[current_d] = pos[tables[j][current_d].D] + tables[j][current_d].R + (i & tables[j][current_d].R) * tables[j][current_d].L; index = pos[current_d] - 1; } a = &A[idx[j]]; offset = idx[j]; ++j; } return idx[num_tables-1]; //return n } //Performs all of the queries given in the array queries //index in A of the queried items are saved in the answers array template<typename TYPE> void searchAll_nonperfect(TYPE *A, vEB_table **tables, uint64_t *idx, uint32_t num_tables, uint32_t d, TYPE *queries, uint64_t *answers, uint64_t numQueries, uint32_t p) { #pragma omp parallel shared(A, tables, idx, num_tables, d, queries, answers, numQueries, p) num_threads(p) { uint64_t pos[d]; uint64_t tid = omp_get_thread_num(); for (uint64_t i = tid; i < numQueries; i += p) { answers[i] = searchvEB_nonperfect<TYPE>(A, tables, idx, num_tables, queries[i], pos, tid); } } } //Generates numQueries random queries and returns the milliseconds needed to perform the queries on the given vEB tree layout template<typename TYPE> double timeQueryvEB_nonperfect(TYPE *A, vEB_table **tables, uint64_t *idx, uint32_t num_tables, uint64_t n, uint32_t d, uint64_t numQueries, uint32_t p) { struct timespec start, end; TYPE *queries = createRandomQueries<TYPE>(A, n, numQueries); //array to store random queries to perform uint64_t *answers = (uint64_t *)malloc(numQueries * sizeof(uint64_t)); //array to store the answers (i.e., index of the queried item) clock_gettime(CLOCK_MONOTONIC, &start); searchAll_nonperfect<TYPE>(A, tables, idx, num_tables, d, queries, answers, numQueries, p); clock_gettime(CLOCK_MONOTONIC, &end); double ms = ((end.tv_sec*1000000000. + end.tv_nsec) - (start.tv_sec*1000000000. + start.tv_nsec)) / 1000000.; //millisecond #ifdef VERIFY bool correct = true; for (uint64_t i = 0; i < numQueries; i++) { if (answers[i] == n || A[answers[i]] != queries[i] || answers[i] == n) { #ifdef DEBUG printf("query = %lu; A[%lu] = %lu\n", queries[i], answers[i], A[answers[i]]); #endif correct = false; } } if (correct == false) printf("Searches failed!\n"); else printf("Searches succeeded!\n"); #endif free(queries); free(answers); return ms; } #endif
omp_for_dynamic_large_chunk.c
// RUN: %libomp-compile // RUN: env OMP_WAIT_POLICY=passive OMP_NUM_THREADS=32 %libomp-run 0 134217728 1 134217728 // // This test makes sure that large chunks sizes are handled correctly // including internal runtime calculations which incorporate the chunk size // Only one thread should execute all iterations. #include <stdio.h> #include <stdlib.h> #include "omp_testsuite.h" typedef unsigned long long ull_t; int main(int argc, char **argv) { int i, j, lb, ub, stride, nthreads, actual_nthreads, chunk; ull_t num_iters = 0; ull_t counted_iters = 0; int errs = 0; if (argc != 5) { fprintf(stderr, "error: incorrect number of arguments\n"); fprintf(stderr, "usage: %s <lb> <ub> <stride> <chunk>\n", argv[0]); exit(EXIT_FAILURE); } lb = atoi(argv[1]); ub = atoi(argv[2]); stride = atoi(argv[3]); chunk = atoi(argv[4]); nthreads = omp_get_max_threads(); if (lb >= ub) { fprintf(stderr, "error: lb must be less than ub\n"); exit(EXIT_FAILURE); } if (stride <= 0) { fprintf(stderr, "error: stride must be positive integer\n"); exit(EXIT_FAILURE); } if (chunk <= 0) { fprintf(stderr, "error: chunk must be positive integer\n"); exit(EXIT_FAILURE); } for (i = lb; i < ub; i += stride) num_iters++; #pragma omp parallel num_threads(nthreads) { #pragma omp single actual_nthreads = omp_get_num_threads(); if (actual_nthreads != nthreads) { printf("did not create enough threads, skipping test.\n"); } else { #pragma omp for schedule(dynamic, chunk) for (i = lb; i < ub; i += stride) { counted_iters++; } } } // Check that the number of iterations executed is correct if (actual_nthreads == nthreads && counted_iters != num_iters) { fprintf(stderr, "error: wrong number of final iterations counted! " "num_iters=%llu, counted_iters=%llu\n", num_iters, counted_iters); exit(EXIT_FAILURE); } return EXIT_SUCCESS; }
// RUN:%libomp - compile // RUN:env OMP_WAIT_POLICY = passive OMP_NUM_THREADS = 32 % libomp - run 0 134217728 1 134217728 // //This test makes sure that large chunks sizes are handled correctly // including internal runtime calculations which incorporate the chunk size // Only one thread should execute all iterations. #include <stdio.h> #include <stdlib.h> #include "omp_testsuite.h" typedef unsigned long long ull_t; int main(int argc, char **argv) { int i, j, lb, ub, stride, nthreads, actual_nthreads, chunk; ull_t num_iters = 0; ull_t counted_iters = 0; int errs = 0; if (argc != 5) { fprintf(stderr, "error: incorrect number of arguments\n"); fprintf(stderr, "usage: %s <lb> <ub> <stride> <chunk>\n", argv[0]); exit(EXIT_FAILURE); } lb = atoi(argv[1]); ub = atoi(argv[2]); stride = atoi(argv[3]); chunk = atoi(argv[4]); nthreads = omp_get_max_threads(); if (lb >= ub) { fprintf(stderr, "error: lb must be less than ub\n"); exit(EXIT_FAILURE); } if (stride <= 0) { fprintf(stderr, "error: stride must be positive integer\n"); exit(EXIT_FAILURE); } if (chunk <= 0) { fprintf(stderr, "error: chunk must be positive integer\n"); exit(EXIT_FAILURE); } for (i = lb; i < ub; i += stride) num_iters++; actual_nthreads = omp_get_num_threads(); if (actual_nthreads != nthreads) { printf("did not create enough threads, skipping test.\n"); } else { for (i = lb; i < ub; i += stride) { counted_iters++; } } //Check that the number of iterations executed is correct if (actual_nthreads == nthreads && counted_iters != num_iters) { fprintf(stderr, "error: wrong number of final iterations counted! " "num_iters=%llu, counted_iters=%llu\n", num_iters, counted_iters); exit(EXIT_FAILURE); } return EXIT_SUCCESS; }
// RUN:%libomp - compile // RUN:env OMP_WAIT_POLICY = passive OMP_NUM_THREADS = 32 % libomp - run 0 134217728 1 134217728 // //This test makes sure that large chunks sizes are handled correctly // including internal runtime calculations which incorporate the chunk size // Only one thread should execute all iterations. #include <stdio.h> #include <stdlib.h> #include "omp_testsuite.h" typedef unsigned long long ull_t; int main(int argc, char **argv) { int i, j, lb, ub, stride, nthreads, actual_nthreads, chunk; ull_t num_iters = 0; ull_t counted_iters = 0; int errs = 0; if (argc != 5) { fprintf(stderr, "error: incorrect number of arguments\n"); fprintf(stderr, "usage: %s <lb> <ub> <stride> <chunk>\n", argv[0]); exit(EXIT_FAILURE); } lb = atoi(argv[1]); ub = atoi(argv[2]); stride = atoi(argv[3]); chunk = atoi(argv[4]); nthreads = omp_get_max_threads(); if (lb >= ub) { fprintf(stderr, "error: lb must be less than ub\n"); exit(EXIT_FAILURE); } if (stride <= 0) { fprintf(stderr, "error: stride must be positive integer\n"); exit(EXIT_FAILURE); } if (chunk <= 0) { fprintf(stderr, "error: chunk must be positive integer\n"); exit(EXIT_FAILURE); } for (i = lb; i < ub; i += stride) num_iters++; #pragma omp parallel num_threads(nthreads) { #pragma omp single actual_nthreads = omp_get_num_threads(); if (actual_nthreads != nthreads) { printf("did not create enough threads, skipping test.\n"); } else { #pragma omp for schedule(dynamic, chunk) for (i = lb; i < ub; i += stride) { counted_iters++; } } } //Check that the number of iterations executed is correct if (actual_nthreads == nthreads && counted_iters != num_iters) { fprintf(stderr, "error: wrong number of final iterations counted! " "num_iters=%llu, counted_iters=%llu\n", num_iters, counted_iters); exit(EXIT_FAILURE); } return EXIT_SUCCESS; }
Contractor.h
/* open source routing machine Copyright (C) Dennis Luxen, others 2010 This program is free software; you can redistribute it and/or modify it under the terms of the GNU AFFERO General Public License as published by the Free Software Foundation; either version 3 of the License, or any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA or see http://www.gnu.org/licenses/agpl.txt. */ #ifndef CONTRACTOR_H_INCLUDED #define CONTRACTOR_H_INCLUDED #ifdef _GLIBCXX_PARALLEL #include <parallel/algorithm> #else #include <algorithm> #endif #include "../DataStructures/DynamicGraph.h" #include "../DataStructures/Percent.h" #include "../DataStructures/BinaryHeap.h" #include <ctime> #include <vector> #include <queue> #include <set> #include <stack> #include <limits> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #define omp_get_max_threads() 1 #endif class Contractor { private: union _MiddleName { NodeID middle; NodeID nameID; }; struct _EdgeData { unsigned distance; unsigned originalEdges : 29; bool shortcut : 1; bool forward : 1; bool backward : 1; //short type:6; bool forwardTurn:1; bool backwardTurn:1; _MiddleName middleName; } data; struct _HeapData { bool target; _HeapData() : target(false) {} _HeapData( bool t ) : target(t) {} }; typedef DynamicGraph< _EdgeData > _DynamicGraph; typedef BinaryHeap< NodeID, NodeID, int, _HeapData> _Heap; typedef _DynamicGraph::InputEdge _ImportEdge; struct _ThreadData { _Heap heap; std::vector< _ImportEdge > insertedEdges; _ThreadData( NodeID nodes ): heap( nodes ) { } }; struct _PriorityData { int depth; NodeID bias; _PriorityData() : depth(0), bias(0) { } }; struct _ContractionInformation { int edgesDeleted; int edgesAdded; int originalEdgesDeleted; int originalEdgesAdded; _ContractionInformation() { edgesAdded = edgesDeleted = originalEdgesAdded = originalEdgesDeleted = 0; } }; struct _NodePartitionor { bool operator()( std::pair< NodeID, bool > nodeData ) { return !nodeData.second; } }; public: template< class InputEdge > Contractor( const int nodes, const std::vector< InputEdge >& inputEdges, const unsigned eqf = 8, const unsigned oqf = 4, const unsigned df = 2) : edgeQuotionFactor(eqf), originalQuotientFactor(oqf), depthFactor(df) { std::vector< _ImportEdge > edges; edges.reserve( 2 * inputEdges.size() ); for ( typename std::vector< InputEdge >::const_iterator i = inputEdges.begin(), e = inputEdges.end(); i != e; ++i ) { _ImportEdge edge; edge.source = i->source(); edge.target = i->target(); edge.data.distance = std::max((int)i->weight(), 1 ); assert( edge.data.distance > 0 ); #ifdef DEBUG if ( edge.data.distance > 24 * 60 * 60 * 10 ) { cout << "Edge Weight too large -> May lead to invalid CH" << endl; continue; } #endif edge.data.shortcut = false; edge.data.middleName.nameID = i->name(); edge.data.forward = i->isForward(); edge.data.backward = i->isBackward(); edge.data.originalEdges = 1; edges.push_back( edge ); std::swap( edge.source, edge.target ); edge.data.forward = i->isBackward(); edge.data.backward = i->isForward(); edges.push_back( edge ); } // std::vector< InputEdge >().swap( inputEdges ); //free memory #ifdef _GLIBCXX_PARALLEL __gnu_parallel::sort( edges.begin(), edges.end() ); #else sort( edges.begin(), edges.end() ); #endif NodeID edge = 0; for ( NodeID i = 0; i < edges.size(); ) { const NodeID source = edges[i].source; const NodeID target = edges[i].target; const NodeID middle = edges[i].data.middleName.nameID; // const short type = edges[i].data.type; // std::cout << "type: " << type << std::endl; // assert(type >= 0); //remove eigenloops if ( source == target ) { i++; continue; } _ImportEdge forwardEdge; _ImportEdge backwardEdge; forwardEdge.source = backwardEdge.source = source; forwardEdge.target = backwardEdge.target = target; forwardEdge.data.forward = backwardEdge.data.backward = true; forwardEdge.data.backward = backwardEdge.data.forward = false; // forwardEdge.data.type = backwardEdge.data.type = type; forwardEdge.data.middleName.nameID = backwardEdge.data.middleName.nameID = middle; forwardEdge.data.shortcut = backwardEdge.data.shortcut = false; forwardEdge.data.originalEdges = backwardEdge.data.originalEdges = 1; forwardEdge.data.distance = backwardEdge.data.distance = std::numeric_limits< int >::max(); //remove parallel edges while ( i < edges.size() && edges[i].source == source && edges[i].target == target ) { if ( edges[i].data.forward ) forwardEdge.data.distance = std::min( edges[i].data.distance, forwardEdge.data.distance ); if ( edges[i].data.backward ) backwardEdge.data.distance = std::min( edges[i].data.distance, backwardEdge.data.distance ); i++; } //merge edges (s,t) and (t,s) into bidirectional edge if ( forwardEdge.data.distance == backwardEdge.data.distance ) { if ( (int)forwardEdge.data.distance != std::numeric_limits< int >::max() ) { forwardEdge.data.backward = true; edges[edge++] = forwardEdge; } } else { //insert seperate edges if ( (int)forwardEdge.data.distance != std::numeric_limits< int >::max() ) { edges[edge++] = forwardEdge; } if ( (int)backwardEdge.data.distance != std::numeric_limits< int >::max() ) { edges[edge++] = backwardEdge; } } } //cout << "[info " << __FILE__ << ":" << __LINE__ << "] contractor removed " << edges.size() - edge << " edges of " << edges.size() << endl; edges.resize( edge ); _graph = new _DynamicGraph( nodes, edges ); std::vector< _ImportEdge >().swap( edges ); } ~Contractor() { delete _graph; } template< class InputEdge > void CheckForAllOrigEdges(std::vector< InputEdge >& inputEdges) { for(unsigned int i = 0; i < inputEdges.size(); i++) { bool found = false; _DynamicGraph::EdgeIterator eit = _graph->BeginEdges(inputEdges[i].source()); for(;eit<_graph->EndEdges(inputEdges[i].source()); eit++) { if(_graph->GetEdgeData(eit).distance == inputEdges[i].weight()) found = true; } eit = _graph->BeginEdges(inputEdges[i].target()); for(;eit<_graph->EndEdges(inputEdges[i].target()); eit++) { if(_graph->GetEdgeData(eit).distance == inputEdges[i].weight()) found = true; } assert(found); } } void Run() { const NodeID numberOfNodes = _graph->GetNumberOfNodes(); Percent p (numberOfNodes); unsigned maxThreads = omp_get_max_threads(); std::vector < _ThreadData* > threadData; for ( unsigned threadNum = 0; threadNum < maxThreads; ++threadNum ) { threadData.push_back( new _ThreadData( numberOfNodes ) ); } //cout << "Contractor is using " << maxThreads << " threads" << endl; NodeID levelID = 0; std::vector< std::pair< NodeID, bool > > remainingNodes( numberOfNodes ); std::vector< double > nodePriority( numberOfNodes ); std::vector< _PriorityData > nodeData( numberOfNodes ); //initialize the variables #pragma omp parallel for schedule ( guided ) for ( int x = 0; x < ( int ) numberOfNodes; ++x ) remainingNodes[x].first = x; std::random_shuffle( remainingNodes.begin(), remainingNodes.end() ); for ( int x = 0; x < ( int ) numberOfNodes; ++x ) nodeData[remainingNodes[x].first].bias = x; //cout << "initializing elimination PQ ..." << flush; #pragma omp parallel { _ThreadData* data = threadData[omp_get_thread_num()]; #pragma omp for schedule ( guided ) for ( int x = 0; x < ( int ) numberOfNodes; ++x ) { nodePriority[x] = _Evaluate( data, &nodeData[x], x ); } } //cout << "ok" << endl << "preprocessing ..." << flush; while ( levelID < numberOfNodes ) { const int last = ( int ) remainingNodes.size(); //determine independent node set #pragma omp parallel for schedule ( guided ) for ( int i = 0; i < last; ++i ) { const NodeID node = remainingNodes[i].first; remainingNodes[i].second = _IsIndependent( _graph, nodePriority, nodeData, node ); } _NodePartitionor functor; const std::vector < std::pair < NodeID, bool > >::const_iterator first = stable_partition( remainingNodes.begin(), remainingNodes.end(), functor ); const int firstIndependent = first - remainingNodes.begin(); //contract independent nodes #pragma omp parallel { _ThreadData* data = threadData[omp_get_thread_num()]; #pragma omp for schedule ( guided ) nowait for ( int position = firstIndependent ; position < last; ++position ) { NodeID x = remainingNodes[position].first; _Contract< false > ( data, x ); nodePriority[x] = -1; } std::sort( data->insertedEdges.begin(), data->insertedEdges.end() ); } #pragma omp parallel { _ThreadData* data = threadData[omp_get_thread_num()]; #pragma omp for schedule ( guided ) nowait for ( int position = firstIndependent ; position < last; ++position ) { NodeID x = remainingNodes[position].first; _DeleteIncomingEdges( data, x ); } } //insert new edges for ( unsigned threadNum = 0; threadNum < maxThreads; ++threadNum ) { _ThreadData& data = *threadData[threadNum]; for ( int i = 0; i < ( int ) data.insertedEdges.size(); ++i ) { const _ImportEdge& edge = data.insertedEdges[i]; bool found = false; for ( _DynamicGraph::EdgeIterator e = _graph->BeginEdges( edge.source ) ; e < _graph->EndEdges( edge.source ) ; ++e ) { const NodeID target = _graph->GetTarget( e ); if ( target != edge.target ) continue; _EdgeData& data = _graph->GetEdgeData( e ); if ( data.distance != edge.data.distance ) continue; if ( data.shortcut != edge.data.shortcut ) continue; if ( data.middleName.middle != edge.data.middleName.middle ) continue; data.forward |= edge.data.forward; data.backward |= edge.data.backward; found = true; break; } if ( !found ) _graph->InsertEdge( edge.source, edge.target, edge.data ); } std::vector< _ImportEdge >().swap( data.insertedEdges ); } //update priorities #pragma omp parallel { _ThreadData* data = threadData[omp_get_thread_num()]; #pragma omp for schedule ( guided ) nowait for ( int position = firstIndependent ; position < last; ++position ) { NodeID x = remainingNodes[position].first; _UpdateNeighbours( &nodePriority, &nodeData, data, x ); } } //remove contracted nodes from the pool levelID += last - firstIndependent; remainingNodes.resize( firstIndependent ); std::vector< std::pair< NodeID, bool > >( remainingNodes ).swap( remainingNodes ); p.printStatus(levelID); } for ( unsigned threadNum = 0; threadNum < maxThreads; threadNum++ ) { delete threadData[threadNum]; } //cout << "[contractor] checking sanity of generated data ..." << flush; _CheckCH<_EdgeData>(); //cout << "ok" << endl; } template< class Edge > void GetEdges( std::vector< Edge >& edges ) { NodeID numberOfNodes = _graph->GetNumberOfNodes(); for ( NodeID node = 0; node < numberOfNodes; ++node ) { for ( _DynamicGraph::EdgeIterator edge = _graph->BeginEdges( node ), endEdges = _graph->EndEdges( node ); edge < endEdges; edge++ ) { const NodeID target = _graph->GetTarget( edge ); const _EdgeData& data = _graph->GetEdgeData( edge ); Edge newEdge; newEdge.source = node; newEdge.target = target; newEdge.data.distance = data.distance; newEdge.data.shortcut = data.shortcut; if(data.shortcut) { newEdge.data.middleName.middle = data.middleName.middle; newEdge.data.type = -1; } else { newEdge.data.middleName.nameID = data.middleName.nameID; // newEdge.data.type = data.type; // assert(newEdge.data.type >= 0); } newEdge.data.forward = data.forward; newEdge.data.backward = data.backward; edges.push_back( newEdge ); } } } private: bool _ConstructCH( _DynamicGraph* _graph ); void _Dijkstra( NodeID source, const int maxDistance, const unsigned numTargets, _ThreadData* data ){ _Heap& heap = data->heap; unsigned nodes = 0; while ( heap.Size() > 0 ) { const NodeID node = heap.DeleteMin(); const int distance = heap.GetKey( node ); if ( nodes++ > numTargets ) return; //Destination settled? if ( distance > maxDistance ) return; //iterate over all edges of node for ( _DynamicGraph::EdgeIterator edge = _graph->BeginEdges( node ), endEdges = _graph->EndEdges( node ); edge != endEdges; ++edge ) { const _EdgeData& data = _graph->GetEdgeData( edge ); if ( !data.forward ) continue; const NodeID to = _graph->GetTarget( edge ); const int toDistance = distance + data.distance; //New Node discovered -> Add to Heap + Node Info Storage if ( !heap.WasInserted( to ) ) heap.Insert( to, toDistance, _HeapData() ); //Found a shorter Path -> Update distance else if ( toDistance < heap.GetKey( to ) ) { heap.DecreaseKey( to, toDistance ); //heap.GetData( to ).hops = hops + 1; } } } } double _Evaluate( _ThreadData* data, _PriorityData* nodeData, NodeID node ){ _ContractionInformation stats; //perform simulated contraction _Contract< true > ( data, node, &stats ); // Result will contain the priority if ( stats.edgesDeleted == 0 || stats.originalEdgesDeleted == 0 ) return depthFactor * nodeData->depth; return edgeQuotionFactor * ((( double ) stats.edgesAdded ) / stats.edgesDeleted ) + originalQuotientFactor * ((( double ) stats.originalEdgesAdded ) / stats.originalEdgesDeleted ) + depthFactor * nodeData->depth; } template< class Edge > bool _CheckCH() { NodeID numberOfNodes = _graph->GetNumberOfNodes(); for ( NodeID node = 0; node < numberOfNodes; ++node ) { for ( _DynamicGraph::EdgeIterator edge = _graph->BeginEdges( node ), endEdges = _graph->EndEdges( node ); edge != endEdges; ++edge ) { const NodeID start = node; const NodeID target = _graph->GetTarget( edge ); const _EdgeData& data = _graph->GetEdgeData( edge ); const NodeID middle = data.middleName.middle; assert(start != target); if(data.shortcut) { if(_graph->FindEdge(start, middle) == SPECIAL_EDGEID && _graph->FindEdge(middle, start) == SPECIAL_EDGEID) { assert(false); return false; } if(_graph->FindEdge(middle, target) == SPECIAL_EDGEID && _graph->FindEdge(target, middle) == SPECIAL_EDGEID) { assert(false); return false; } } } } return true; } template< bool Simulate > bool _Contract( _ThreadData* data, NodeID node, _ContractionInformation* stats = NULL ) { _Heap& heap = data->heap; for ( _DynamicGraph::EdgeIterator inEdge = _graph->BeginEdges( node ), endInEdges = _graph->EndEdges( node ); inEdge != endInEdges; ++inEdge ) { const _EdgeData& inData = _graph->GetEdgeData( inEdge ); const NodeID source = _graph->GetTarget( inEdge ); if ( Simulate ) { assert( stats != NULL ); unsigned factor = (inData.forward && inData.backward ? 2 : 1 ); stats->edgesDeleted+=factor; stats->originalEdgesDeleted += factor*inData.originalEdges; } if ( !inData.backward ) continue; heap.Clear(); heap.Insert( source, 0, _HeapData() ); if ( node != source ) heap.Insert( node, inData.distance, _HeapData() ); int maxDistance = 0; //unsigned numTargets = 0; for ( _DynamicGraph::EdgeIterator outEdge = _graph->BeginEdges( node ), endOutEdges = _graph->EndEdges( node ); outEdge != endOutEdges; ++outEdge ) { const _EdgeData& outData = _graph->GetEdgeData( outEdge ); if ( !outData.forward ) continue; const NodeID target = _graph->GetTarget( outEdge ); const int pathDistance = inData.distance + outData.distance; maxDistance = std::max( maxDistance, pathDistance ); if ( !heap.WasInserted( target ) ) heap.Insert( target, pathDistance, _HeapData(true) ); else if ( pathDistance < heap.GetKey( target ) ) heap.DecreaseKey( target, pathDistance ); } if( Simulate ) _Dijkstra( source, maxDistance, 500, data ); else _Dijkstra( source, maxDistance, 1000, data ); for ( _DynamicGraph::EdgeIterator outEdge = _graph->BeginEdges( node ), endOutEdges = _graph->EndEdges( node ); outEdge != endOutEdges; ++outEdge ) { const _EdgeData& outData = _graph->GetEdgeData( outEdge ); if ( !outData.forward ) continue; const NodeID target = _graph->GetTarget( outEdge ); const int pathDistance = inData.distance + outData.distance; const int distance = heap.GetKey( target ); if ( pathDistance <= distance ) { if ( Simulate ) { assert( stats != NULL ); stats->edgesAdded++; stats->originalEdgesAdded += ( outData.originalEdges + inData.originalEdges ); } else { _ImportEdge newEdge; newEdge.source = source; newEdge.target = target; newEdge.data.distance = pathDistance; newEdge.data.forward = true; newEdge.data.backward = false; newEdge.data.middleName.middle = node; newEdge.data.shortcut = true; newEdge.data.originalEdges = outData.originalEdges + inData.originalEdges; data->insertedEdges.push_back( newEdge ); std::swap( newEdge.source, newEdge.target ); newEdge.data.forward = false; newEdge.data.backward = true; data->insertedEdges.push_back( newEdge ); } } } } return true; } bool _DeleteIncomingEdges( _ThreadData* data, NodeID node ) { std::vector < NodeID > neighbours; //find all neighbours for ( _DynamicGraph::EdgeIterator e = _graph->BeginEdges( node ) ; e < _graph->EndEdges( node ) ; ++e ) { const NodeID u = _graph->GetTarget( e ); if ( u == node ) continue; neighbours.push_back( u ); } //eliminate duplicate entries ( forward + backward edges ) std::sort( neighbours.begin(), neighbours.end() ); neighbours.resize( std::unique( neighbours.begin(), neighbours.end() ) - neighbours.begin() ); for ( int i = 0, e = ( int ) neighbours.size(); i < e; ++i ) { const NodeID u = neighbours[i]; _graph->DeleteEdgesTo( u, node ); } return true; } bool _UpdateNeighbours( std::vector< double >* priorities, std::vector< _PriorityData >* nodeData, _ThreadData* data, NodeID node ) { std::vector < NodeID > neighbours; //find all neighbours for ( _DynamicGraph::EdgeIterator e = _graph->BeginEdges( node ) ; e < _graph->EndEdges( node ) ; ++e ) { const NodeID u = _graph->GetTarget( e ); if ( u == node ) continue; neighbours.push_back( u ); ( *nodeData )[u].depth = std::max(( *nodeData )[node].depth + 1, ( *nodeData )[u].depth ); } //eliminate duplicate entries ( forward + backward edges ) std::sort( neighbours.begin(), neighbours.end() ); neighbours.resize( std::unique( neighbours.begin(), neighbours.end() ) - neighbours.begin() ); for ( int i = 0, e = ( int ) neighbours.size(); i < e; ++i ) { const NodeID u = neighbours[i]; ( *priorities )[u] = _Evaluate( data, &( *nodeData )[u], u ); } return true; } bool _IsIndependent( const _DynamicGraph* _graph, const std::vector< double >& priorities, const std::vector< _PriorityData >& nodeData, NodeID node ) { const double priority = priorities[node]; std::vector< NodeID > neighbours; for ( _DynamicGraph::EdgeIterator e = _graph->BeginEdges( node ) ; e < _graph->EndEdges( node ) ; ++e ) { const NodeID target = _graph->GetTarget( e ); const double targetPriority = priorities[target]; assert( targetPriority >= 0 ); //found a neighbour with lower priority? if ( priority > targetPriority ) return false; //tie breaking if ( priority == targetPriority && nodeData[node].bias < nodeData[target].bias ) return false; neighbours.push_back( target ); } std::sort( neighbours.begin(), neighbours.end() ); neighbours.resize( std::unique( neighbours.begin(), neighbours.end() ) - neighbours.begin() ); //examine all neighbours that are at most 2 hops away for ( std::vector< NodeID >::const_iterator i = neighbours.begin(), lastNode = neighbours.end(); i != lastNode; ++i ) { const NodeID u = *i; for ( _DynamicGraph::EdgeIterator e = _graph->BeginEdges( u ) ; e < _graph->EndEdges( u ) ; ++e ) { const NodeID target = _graph->GetTarget( e ); const double targetPriority = priorities[target]; assert( targetPriority >= 0 ); //found a neighbour with lower priority? if ( priority > targetPriority ) return false; //tie breaking if ( priority == targetPriority && nodeData[node].bias < nodeData[target].bias ) return false; } } return true; } _DynamicGraph* _graph; std::vector<NodeID> * _components; unsigned edgeQuotionFactor; unsigned originalQuotientFactor; unsigned depthFactor; }; #endif // CONTRACTOR_H_INCLUDED
#ifndef CONTRACTOR_H_INCLUDED #define CONTRACTOR_H_INCLUDED #ifdef _GLIBCXX_PARALLEL #include <parallel/algorithm> #else #include <algorithm> #endif #include "../DataStructures/DynamicGraph.h" #include "../DataStructures/Percent.h" #include "../DataStructures/BinaryHeap.h" #include <ctime> #include <vector> #include <queue> #include <set> #include <stack> #include <limits> class Contractor { private: union _MiddleName { NodeID middle; NodeID nameID; }; struct _EdgeData { unsigned distance; unsigned originalEdges:29; bool shortcut:1; bool forward:1; bool backward:1; //short type:6; bool forwardTurn:1; bool backwardTurn:1; _MiddleName middleName; } data; struct _HeapData { bool target; _HeapData(): target(false) { } _HeapData(bool t): target(t) { } }; typedef DynamicGraph < _EdgeData > _DynamicGraph; typedef BinaryHeap < NodeID, NodeID, int, _HeapData > _Heap; typedef _DynamicGraph::InputEdge _ImportEdge; struct _ThreadData { _Heap heap; std: : vector < _ImportEdge > insertedEdges; _ThreadData(NodeID nodes): heap(nodes) { } }; struct _PriorityData { int depth; NodeID bias; _PriorityData(): depth(0), bias(0) { } }; struct _ContractionInformation { int edgesDeleted; int edgesAdded; int originalEdgesDeleted; int originalEdgesAdded; _ContractionInformation() { edgesAdded = edgesDeleted = originalEdgesAdded = originalEdgesDeleted = 0; } }; struct _NodePartitionor { bool operator() (std::pair < NodeID, bool > nodeData) { return !nodeData.second; } }; public: template < class InputEdge > Contractor(const int nodes, const std: : vector < InputEdge > &inputEdges, const unsigned eqf = 8, const unsigned oqf = 4, const unsigned df = 2):edgeQuotionFactor(eqf), originalQuotientFactor(oqf), depthFactor(df) { std: : vector < _ImportEdge > edges; edges.reserve(2 * inputEdges.size()); for (typename std: : vector < InputEdge >: :const_iterator i = inputEdges.begin(), e = inputEdges.end(); i != e; ++i) { _ImportEdge edge; edge.source = i->source(); edge.target = i->target(); edge.data.distance = std: :max((int)i->weight(), 1); assert(edge.data.distance > 0); #ifdef DEBUG if (edge.data.distance > 24 * 60 * 60 * 10) { cout << "Edge Weight too large -> May lead to invalid CH" << endl; continue; } #endif edge.data.shortcut = false; edge.data.middleName.nameID = i->name(); edge.data.forward = i->isForward(); edge.data.backward = i->isBackward(); edge.data.originalEdges = 1; edges.push_back(edge); std: : swap(edge.source, edge.target); edge.data.forward = i->isBackward(); edge.data.backward = i->isForward(); edges.push_back(edge); } //std: :vector < InputEdge > ().swap(inputEdges); //free memory #ifdef _GLIBCXX_PARALLEL __gnu_parallel: :sort(edges.begin(), edges.end()); #else sort(edges.begin(), edges.end()); #endif NodeID edge = 0; for (NodeID i = 0; i < edges.size();) { const NodeID source = edges[i].source; const NodeID target = edges[i].target; const NodeID middle = edges[i].data.middleName.nameID; //const short type = edges[i].data.type; //std: : cout << "type: " << type << std: :endl; //assert(type >= 0); //remove eigenloops if (source == target) { i++; continue; } _ImportEdge forwardEdge; _ImportEdge backwardEdge; forwardEdge.source = backwardEdge.source = source; forwardEdge.target = backwardEdge.target = target; forwardEdge.data.forward = backwardEdge.data.backward = true; forwardEdge.data.backward = backwardEdge.data.forward = false; //forwardEdge.data.type = backwardEdge.data.type = type; forwardEdge.data.middleName.nameID = backwardEdge.data.middleName.nameID = middle; forwardEdge.data.shortcut = backwardEdge.data.shortcut = false; forwardEdge.data.originalEdges = backwardEdge.data.originalEdges = 1; forwardEdge.data.distance = backwardEdge.data.distance = std: :numeric_limits < int >::max(); //remove parallel edges while (i < edges.size() && edges[i].source == source && edges[i].target == target) { if (edges[i].data.forward) forwardEdge.data.distance = std: :min(edges[i].data.distance, forwardEdge.data.distance); if (edges[i].data.backward) backwardEdge.data.distance = std: :min(edges[i].data.distance, backwardEdge.data.distance); i++; } //merge edges(s, t) and(t, s) into bidirectional edge if (forwardEdge.data.distance == backwardEdge.data.distance) { if ((int)forwardEdge.data.distance != std::numeric_limits < int >::max()) { forwardEdge.data.backward = true; edges[edge++] = forwardEdge; } } else { //insert seperate edges if ((int)forwardEdge.data.distance != std: : numeric_limits < int >: :max()) { edges[edge++] = forwardEdge; } if ((int)backwardEdge.data.distance != std: : numeric_limits < int >: :max()) { edges[edge++] = backwardEdge; } } } //cout << "[info " << __FILE__ << ":" << __LINE__ << "] contractor removed " << edges.size() - edge << " edges of " << edges.size() << endl; edges.resize(edge); _graph = new _DynamicGraph(nodes, edges); std: : vector < _ImportEdge > ().swap(edges); } ~Contractor() { delete _graph; } template < class InputEdge > void CheckForAllOrigEdges(std::vector < InputEdge > &inputEdges) { for (unsigned int i = 0; i < inputEdges.size(); i++) { bool found = false; _DynamicGraph: : EdgeIterator eit = _graph->BeginEdges(inputEdges[i].source()); for (; eit < _graph->EndEdges(inputEdges[i].source()); eit++) { if (_graph->GetEdgeData(eit).distance == inputEdges[i].weight()) found = true; } eit = _graph->BeginEdges(inputEdges[i].target()); for (; eit < _graph->EndEdges(inputEdges[i].target()); eit++) { if (_graph->GetEdgeData(eit).distance == inputEdges[i].weight()) found = true; } assert(found); } } void Run() { const NodeID numberOfNodes = _graph->GetNumberOfNodes(); Percent p(numberOfNodes); unsigned maxThreads = omp_get_max_threads(); std: : vector < _ThreadData * >threadData; for (unsigned threadNum = 0; threadNum < maxThreads; ++threadNum) { threadData.push_back(new _ThreadData(numberOfNodes)); } //cout << "Contractor is using " << maxThreads << " threads" << endl; NodeID levelID = 0; std: : vector < std: :pair < NodeID, bool > >remainingNodes(numberOfNodes); std: : vector < double >nodePriority(numberOfNodes); std: : vector < _PriorityData > nodeData(numberOfNodes); //initialize the variables for (int x = 0; x < (int)numberOfNodes; ++x) remainingNodes[x].first = x; std: : random_shuffle(remainingNodes.begin(), remainingNodes.end()); for (int x = 0; x < (int)numberOfNodes; ++x) nodeData[remainingNodes[x].first].bias = x; //cout << "initializing elimination PQ ..." << flush; _ThreadData *data = threadData[omp_get_thread_num()]; for (int x = 0; x < (int)numberOfNodes; ++x) { nodePriority[x] = _Evaluate(data, &nodeData[x], x); } //cout << "ok" << endl << "preprocessing ..." << flush; while (levelID < numberOfNodes) { const int last = (int)remainingNodes.size(); //determine independent node set for (int i = 0; i < last; ++i) { const NodeID node = remainingNodes[i].first; remainingNodes[i].second = _IsIndependent(_graph, nodePriority, nodeData, node); } _NodePartitionor functor; const std::vector < std::pair < NodeID, bool > >::const_iterator first = stable_partition(remainingNodes.begin(), remainingNodes.end(), functor); const int firstIndependent = first - remainingNodes.begin(); //contract independent nodes _ThreadData * data = threadData[omp_get_thread_num()]; for (int position = firstIndependent; position < last; ++position) { NodeID x = remainingNodes[position].first; _Contract < false > (data, x); nodePriority[x] = -1; } std: : sort(data->insertedEdges.begin(), data->insertedEdges.end()); _ThreadData *data = threadData[omp_get_thread_num()]; for (int position = firstIndependent; position < last; ++position) { NodeID x = remainingNodes[position].first; _DeleteIncomingEdges(data, x); } //insert new edges for (unsigned threadNum = 0; threadNum < maxThreads; ++threadNum) { _ThreadData & data = *threadData[threadNum]; for (int i = 0; i < (int)data.insertedEdges.size(); ++i) { const _ImportEdge & edge = data.insertedEdges[i]; bool found = false; for (_DynamicGraph: :EdgeIterator e = _graph->BeginEdges(edge.source); e < _graph->EndEdges(edge.source); ++e) { const NodeID target = _graph->GetTarget(e); if (target != edge.target) continue; _EdgeData & data = _graph->GetEdgeData(e); if (data.distance != edge.data.distance) continue; if (data.shortcut != edge.data.shortcut) continue; if (data.middleName.middle != edge.data.middleName.middle) continue; data.forward |= edge.data.forward; data.backward |= edge.data.backward; found = true; break; } if (!found) _graph->InsertEdge(edge.source, edge.target, edge.data); } std: : vector < _ImportEdge > ().swap(data.insertedEdges); } //update priorities _ThreadData * data = threadData[omp_get_thread_num()]; for (int position = firstIndependent; position < last; ++position) { NodeID x = remainingNodes[position].first; _UpdateNeighbours(&nodePriority, &nodeData, data, x); } //remove contracted nodes from the pool levelID += last - firstIndependent; remainingNodes.resize(firstIndependent); std: : vector < std: :pair < NodeID, bool > >(remainingNodes).swap(remainingNodes); p.printStatus(levelID); } for (unsigned threadNum = 0; threadNum < maxThreads; threadNum++) { delete threadData[threadNum]; } //cout << "[contractor] checking sanity of generated data ..." << flush; _CheckCH < _EdgeData > (); //cout << "ok" << endl; } template < class Edge > void GetEdges(std::vector < Edge > &edges) { NodeID numberOfNodes = _graph->GetNumberOfNodes(); for (NodeID node = 0; node < numberOfNodes; ++node) { for (_DynamicGraph: :EdgeIterator edge = _graph->BeginEdges(node), endEdges = _graph->EndEdges(node); edge < endEdges; edge++) { const NodeID target = _graph->GetTarget(edge); const _EdgeData & data = _graph->GetEdgeData(edge); Edge newEdge; newEdge.source = node; newEdge.target = target; newEdge.data.distance = data.distance; newEdge.data.shortcut = data.shortcut; if (data.shortcut) { newEdge.data.middleName.middle = data.middleName.middle; newEdge.data.type = -1; } else { newEdge.data.middleName.nameID = data.middleName.nameID; //newEdge.data.type = data.type; //assert(newEdge.data.type >= 0); } newEdge.data.forward = data.forward; newEdge.data.backward = data.backward; edges.push_back(newEdge); } } } private: bool _ConstructCH(_DynamicGraph * _graph); void _Dijkstra(NodeID source, const int maxDistance, const unsigned numTargets, _ThreadData * data) { _Heap & heap = data->heap; unsigned nodes = 0; while (heap.Size() > 0) { const NodeID node = heap.DeleteMin(); const int distance = heap.GetKey(node); if (nodes++ > numTargets) return; //Destination settled ? if (distance > maxDistance) return; //iterate over all edges of node for (_DynamicGraph: : EdgeIterator edge = _graph->BeginEdges(node), endEdges = _graph->EndEdges(node); edge != endEdges; ++edge) { const _EdgeData & data = _graph->GetEdgeData(edge); if (!data.forward) continue; const NodeID to = _graph->GetTarget(edge); const int toDistance = distance + data.distance; //New Node discovered->Add to Heap + Node Info Storage if (!heap.WasInserted(to)) heap.Insert(to, toDistance, _HeapData()); //Found a shorter Path->Update distance else if (toDistance < heap.GetKey(to)) { heap.DecreaseKey(to, toDistance); //heap.GetData(to).hops = hops + 1; } } } } double _Evaluate(_ThreadData * data, _PriorityData * nodeData, NodeID node) { _ContractionInformation stats; //perform simulated contraction _Contract < true > (data, node, &stats); //Result will contain the priority if (stats.edgesDeleted == 0 || stats.originalEdgesDeleted == 0) return depthFactor * nodeData->depth; return edgeQuotionFactor * (((double)stats.edgesAdded) / stats.edgesDeleted) + originalQuotientFactor * (((double)stats.originalEdgesAdded) / stats.originalEdgesDeleted) + depthFactor * nodeData->depth; } template < class Edge > bool _CheckCH() { NodeID numberOfNodes = _graph->GetNumberOfNodes(); for (NodeID node = 0; node < numberOfNodes; ++node) { for (_DynamicGraph: :EdgeIterator edge = _graph->BeginEdges(node), endEdges = _graph->EndEdges(node); edge != endEdges; ++edge) { const NodeID start = node; const NodeID target = _graph->GetTarget(edge); const _EdgeData & data = _graph->GetEdgeData(edge); const NodeID middle = data.middleName.middle; assert(start != target); if (data.shortcut) { if (_graph->FindEdge(start, middle) == SPECIAL_EDGEID && _graph->FindEdge(middle, start) == SPECIAL_EDGEID) { assert(false); return false; } if (_graph->FindEdge(middle, target) == SPECIAL_EDGEID && _graph->FindEdge(target, middle) == SPECIAL_EDGEID) { assert(false); return false; } } } } return true; } template < bool Simulate > bool _Contract(_ThreadData * data, NodeID node, _ContractionInformation * stats = NULL) { _Heap & heap = data->heap; for (_DynamicGraph: :EdgeIterator inEdge = _graph->BeginEdges(node), endInEdges = _graph->EndEdges(node); inEdge != endInEdges; ++inEdge) { const _EdgeData & inData = _graph->GetEdgeData(inEdge); const NodeID source = _graph->GetTarget(inEdge); if (Simulate) { assert(stats != NULL); unsigned factor = (inData.forward && inData.backward ? 2 : 1); stats->edgesDeleted += factor; stats->originalEdgesDeleted += factor * inData.originalEdges; } if (!inData.backward) continue; heap.Clear(); heap.Insert(source, 0, _HeapData()); if (node != source) heap.Insert(node, inData.distance, _HeapData()); int maxDistance = 0; //unsigned numTargets = 0; for (_DynamicGraph: :EdgeIterator outEdge = _graph->BeginEdges(node), endOutEdges = _graph->EndEdges(node); outEdge != endOutEdges; ++outEdge) { const _EdgeData & outData = _graph->GetEdgeData(outEdge); if (!outData.forward) continue; const NodeID target = _graph->GetTarget(outEdge); const int pathDistance = inData.distance + outData.distance; maxDistance = std: :max(maxDistance, pathDistance); if (!heap.WasInserted(target)) heap.Insert(target, pathDistance, _HeapData(true)); else if (pathDistance < heap.GetKey(target)) heap.DecreaseKey(target, pathDistance); } if (Simulate) _Dijkstra(source, maxDistance, 500, data); else _Dijkstra(source, maxDistance, 1000, data); for (_DynamicGraph: :EdgeIterator outEdge = _graph->BeginEdges(node), endOutEdges = _graph->EndEdges(node); outEdge != endOutEdges; ++outEdge) { const _EdgeData & outData = _graph->GetEdgeData(outEdge); if (!outData.forward) continue; const NodeID target = _graph->GetTarget(outEdge); const int pathDistance = inData.distance + outData.distance; const int distance = heap.GetKey(target); if (pathDistance <= distance) { if (Simulate) { assert(stats != NULL); stats->edgesAdded++; stats->originalEdgesAdded += (outData.originalEdges + inData.originalEdges); } else { _ImportEdge newEdge; newEdge.source = source; newEdge.target = target; newEdge.data.distance = pathDistance; newEdge.data.forward = true; newEdge.data.backward = false; newEdge.data.middleName.middle = node; newEdge.data.shortcut = true; newEdge.data.originalEdges = outData.originalEdges + inData.originalEdges; data->insertedEdges.push_back(newEdge); std: : swap(newEdge.source, newEdge.target); newEdge.data.forward = false; newEdge.data.backward = true; data->insertedEdges.push_back(newEdge); } } } } return true; } bool _DeleteIncomingEdges(_ThreadData * data, NodeID node) { std::vector < NodeID > neighbours; //find all neighbours for (_DynamicGraph: :EdgeIterator e = _graph->BeginEdges(node); e < _graph->EndEdges(node); ++e) { const NodeID u = _graph->GetTarget(e); if (u == node) continue; neighbours.push_back(u); } //eliminate duplicate entries(forward + backward edges) std: : sort(neighbours.begin(), neighbours.end()); neighbours.resize(std: :unique(neighbours.begin(), neighbours.end()) - neighbours.begin()); for (int i = 0, e = (int)neighbours.size(); i < e; ++i) { const NodeID u = neighbours[i]; _graph->DeleteEdgesTo(u, node); } return true; } bool _UpdateNeighbours(std::vector < double >*priorities, std::vector < _PriorityData > *nodeData, _ThreadData * data, NodeID node) { std::vector < NodeID > neighbours; //find all neighbours for (_DynamicGraph: :EdgeIterator e = _graph->BeginEdges(node); e < _graph->EndEdges(node); ++e) { const NodeID u = _graph->GetTarget(e); if (u == node) continue; neighbours.push_back(u); (*nodeData)[u].depth = std: : max((*nodeData)[node].depth + 1, (*nodeData)[u].depth); } //eliminate duplicate entries(forward + backward edges) std: : sort(neighbours.begin(), neighbours.end()); neighbours.resize(std: :unique(neighbours.begin(), neighbours.end()) - neighbours.begin()); for (int i = 0, e = (int)neighbours.size(); i < e; ++i) { const NodeID u = neighbours[i]; (*priorities)[u] = _Evaluate(data, &(*nodeData)[u], u); } return true; } bool _IsIndependent(const _DynamicGraph * _graph, const std::vector < double >&priorities, const std::vector < _PriorityData > &nodeData, NodeID node) { const double priority = priorities[node]; std: : vector < NodeID > neighbours; for (_DynamicGraph: : EdgeIterator e = _graph->BeginEdges(node); e < _graph->EndEdges(node); ++e) { const NodeID target = _graph->GetTarget(e); const double targetPriority = priorities[target]; assert(targetPriority >= 0); //found a neighbour with lower priority ? if (priority > targetPriority) return false; //tie breaking if (priority == targetPriority && nodeData[node].bias < nodeData[target].bias) return false; neighbours.push_back(target); } std: : sort(neighbours.begin(), neighbours.end()); neighbours.resize(std: :unique(neighbours.begin(), neighbours.end()) - neighbours.begin()); //examine all neighbours that are at most 2 hops away for (std: : vector < NodeID >: :const_iterator i = neighbours.begin(), lastNode = neighbours.end(); i != lastNode; ++i) { const NodeID u = *i; for (_DynamicGraph: :EdgeIterator e = _graph->BeginEdges(u); e < _graph->EndEdges(u); ++e) { const NodeID target = _graph->GetTarget(e); const double targetPriority = priorities[target]; assert(targetPriority >= 0); //found a neighbour with lower priority ? if (priority > targetPriority) return false; //tie breaking if (priority == targetPriority && nodeData[node].bias < nodeData[target].bias) return false; } } return true; } _DynamicGraph *_graph; std: :vector < NodeID > *_components; unsigned edgeQuotionFactor; unsigned originalQuotientFactor; unsigned depthFactor; }; #endif /* // CONTRACTOR_H_INCLUDED */
#ifndef CONTRACTOR_H_INCLUDED #define CONTRACTOR_H_INCLUDED #ifdef _GLIBCXX_PARALLEL #include <parallel/algorithm> #else #include <algorithm> #endif #include "../DataStructures/DynamicGraph.h" #include "../DataStructures/Percent.h" #include "../DataStructures/BinaryHeap.h" #include <ctime> #include <vector> #include <queue> #include <set> #include <stack> #include <limits> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #define omp_get_max_threads() 1 #endif class Contractor { private: union _MiddleName { NodeID middle; NodeID nameID; }; struct _EdgeData { unsigned distance; unsigned originalEdges:29; bool shortcut:1; bool forward:1; bool backward:1; //short type:6; bool forwardTurn:1; bool backwardTurn:1; _MiddleName middleName; } data; struct _HeapData { bool target; _HeapData(): target(false) { } _HeapData(bool t): target(t) { } }; typedef DynamicGraph < _EdgeData > _DynamicGraph; typedef BinaryHeap < NodeID, NodeID, int, _HeapData > _Heap; typedef _DynamicGraph::InputEdge _ImportEdge; struct _ThreadData { _Heap heap; std: : vector < _ImportEdge > insertedEdges; _ThreadData(NodeID nodes): heap(nodes) { } }; struct _PriorityData { int depth; NodeID bias; _PriorityData(): depth(0), bias(0) { } }; struct _ContractionInformation { int edgesDeleted; int edgesAdded; int originalEdgesDeleted; int originalEdgesAdded; _ContractionInformation() { edgesAdded = edgesDeleted = originalEdgesAdded = originalEdgesDeleted = 0; } }; struct _NodePartitionor { bool operator() (std::pair < NodeID, bool > nodeData) { return !nodeData.second; } }; public: template < class InputEdge > Contractor(const int nodes, const std: : vector < InputEdge > &inputEdges, const unsigned eqf = 8, const unsigned oqf = 4, const unsigned df = 2):edgeQuotionFactor(eqf), originalQuotientFactor(oqf), depthFactor(df) { std: : vector < _ImportEdge > edges; edges.reserve(2 * inputEdges.size()); for (typename std: : vector < InputEdge >: :const_iterator i = inputEdges.begin(), e = inputEdges.end(); i != e; ++i) { _ImportEdge edge; edge.source = i->source(); edge.target = i->target(); edge.data.distance = std: :max((int)i->weight(), 1); assert(edge.data.distance > 0); #ifdef DEBUG if (edge.data.distance > 24 * 60 * 60 * 10) { cout << "Edge Weight too large -> May lead to invalid CH" << endl; continue; } #endif edge.data.shortcut = false; edge.data.middleName.nameID = i->name(); edge.data.forward = i->isForward(); edge.data.backward = i->isBackward(); edge.data.originalEdges = 1; edges.push_back(edge); std: : swap(edge.source, edge.target); edge.data.forward = i->isBackward(); edge.data.backward = i->isForward(); edges.push_back(edge); } //std: :vector < InputEdge > ().swap(inputEdges); //free memory #ifdef _GLIBCXX_PARALLEL __gnu_parallel: :sort(edges.begin(), edges.end()); #else sort(edges.begin(), edges.end()); #endif NodeID edge = 0; for (NodeID i = 0; i < edges.size();) { const NodeID source = edges[i].source; const NodeID target = edges[i].target; const NodeID middle = edges[i].data.middleName.nameID; //const short type = edges[i].data.type; //std: : cout << "type: " << type << std: :endl; //assert(type >= 0); //remove eigenloops if (source == target) { i++; continue; } _ImportEdge forwardEdge; _ImportEdge backwardEdge; forwardEdge.source = backwardEdge.source = source; forwardEdge.target = backwardEdge.target = target; forwardEdge.data.forward = backwardEdge.data.backward = true; forwardEdge.data.backward = backwardEdge.data.forward = false; //forwardEdge.data.type = backwardEdge.data.type = type; forwardEdge.data.middleName.nameID = backwardEdge.data.middleName.nameID = middle; forwardEdge.data.shortcut = backwardEdge.data.shortcut = false; forwardEdge.data.originalEdges = backwardEdge.data.originalEdges = 1; forwardEdge.data.distance = backwardEdge.data.distance = std: :numeric_limits < int >::max(); //remove parallel edges while (i < edges.size() && edges[i].source == source && edges[i].target == target) { if (edges[i].data.forward) forwardEdge.data.distance = std: :min(edges[i].data.distance, forwardEdge.data.distance); if (edges[i].data.backward) backwardEdge.data.distance = std: :min(edges[i].data.distance, backwardEdge.data.distance); i++; } //merge edges(s, t) and(t, s) into bidirectional edge if (forwardEdge.data.distance == backwardEdge.data.distance) { if ((int)forwardEdge.data.distance != std::numeric_limits < int >::max()) { forwardEdge.data.backward = true; edges[edge++] = forwardEdge; } } else { //insert seperate edges if ((int)forwardEdge.data.distance != std: : numeric_limits < int >: :max()) { edges[edge++] = forwardEdge; } if ((int)backwardEdge.data.distance != std: : numeric_limits < int >: :max()) { edges[edge++] = backwardEdge; } } } //cout << "[info " << __FILE__ << ":" << __LINE__ << "] contractor removed " << edges.size() - edge << " edges of " << edges.size() << endl; edges.resize(edge); _graph = new _DynamicGraph(nodes, edges); std: : vector < _ImportEdge > ().swap(edges); } ~Contractor() { delete _graph; } template < class InputEdge > void CheckForAllOrigEdges(std::vector < InputEdge > &inputEdges) { for (unsigned int i = 0; i < inputEdges.size(); i++) { bool found = false; _DynamicGraph: : EdgeIterator eit = _graph->BeginEdges(inputEdges[i].source()); for (; eit < _graph->EndEdges(inputEdges[i].source()); eit++) { if (_graph->GetEdgeData(eit).distance == inputEdges[i].weight()) found = true; } eit = _graph->BeginEdges(inputEdges[i].target()); for (; eit < _graph->EndEdges(inputEdges[i].target()); eit++) { if (_graph->GetEdgeData(eit).distance == inputEdges[i].weight()) found = true; } assert(found); } } void Run() { const NodeID numberOfNodes = _graph->GetNumberOfNodes(); Percent p(numberOfNodes); unsigned maxThreads = omp_get_max_threads(); std: : vector < _ThreadData * >threadData; for (unsigned threadNum = 0; threadNum < maxThreads; ++threadNum) { threadData.push_back(new _ThreadData(numberOfNodes)); } //cout << "Contractor is using " << maxThreads << " threads" << endl; NodeID levelID = 0; std: : vector < std: :pair < NodeID, bool > >remainingNodes(numberOfNodes); std: : vector < double >nodePriority(numberOfNodes); std: : vector < _PriorityData > nodeData(numberOfNodes); //initialize the variables #pragma omp parallel for schedule ( guided ) for (int x = 0; x < (int)numberOfNodes; ++x) remainingNodes[x].first = x; std: : random_shuffle(remainingNodes.begin(), remainingNodes.end()); for (int x = 0; x < (int)numberOfNodes; ++x) nodeData[remainingNodes[x].first].bias = x; //cout << "initializing elimination PQ ..." << flush; #pragma omp parallel { _ThreadData *data = threadData[omp_get_thread_num()]; #pragma omp for schedule ( guided ) for (int x = 0; x < (int)numberOfNodes; ++x) { nodePriority[x] = _Evaluate(data, &nodeData[x], x); } } //cout << "ok" << endl << "preprocessing ..." << flush; while (levelID < numberOfNodes) { const int last = (int)remainingNodes.size(); //determine independent node set #pragma omp parallel for schedule ( guided ) for (int i = 0; i < last; ++i) { const NodeID node = remainingNodes[i].first; remainingNodes[i].second = _IsIndependent(_graph, nodePriority, nodeData, node); } _NodePartitionor functor; const std::vector < std::pair < NodeID, bool > >::const_iterator first = stable_partition(remainingNodes.begin(), remainingNodes.end(), functor); const int firstIndependent = first - remainingNodes.begin(); //contract independent nodes #pragma omp parallel { _ThreadData *data = threadData[omp_get_thread_num()]; #pragma omp for schedule ( guided ) nowait for (int position = firstIndependent; position < last; ++position) { NodeID x = remainingNodes[position].first; _Contract < false > (data, x); nodePriority[x] = -1; } std: : sort(data->insertedEdges.begin(), data->insertedEdges.end()); } #pragma omp parallel { _ThreadData *data = threadData[omp_get_thread_num()]; #pragma omp for schedule ( guided ) nowait for (int position = firstIndependent; position < last; ++position) { NodeID x = remainingNodes[position].first; _DeleteIncomingEdges(data, x); } } //insert new edges for (unsigned threadNum = 0; threadNum < maxThreads; ++threadNum) { _ThreadData & data = *threadData[threadNum]; for (int i = 0; i < (int)data.insertedEdges.size(); ++i) { const _ImportEdge & edge = data.insertedEdges[i]; bool found = false; for (_DynamicGraph: :EdgeIterator e = _graph->BeginEdges(edge.source); e < _graph->EndEdges(edge.source); ++e) { const NodeID target = _graph->GetTarget(e); if (target != edge.target) continue; _EdgeData & data = _graph->GetEdgeData(e); if (data.distance != edge.data.distance) continue; if (data.shortcut != edge.data.shortcut) continue; if (data.middleName.middle != edge.data.middleName.middle) continue; data.forward |= edge.data.forward; data.backward |= edge.data.backward; found = true; break; } if (!found) _graph->InsertEdge(edge.source, edge.target, edge.data); } std: : vector < _ImportEdge > ().swap(data.insertedEdges); } //update priorities #pragma omp parallel { _ThreadData *data = threadData[omp_get_thread_num()]; #pragma omp for schedule ( guided ) nowait for (int position = firstIndependent; position < last; ++position) { NodeID x = remainingNodes[position].first; _UpdateNeighbours(&nodePriority, &nodeData, data, x); } } //remove contracted nodes from the pool levelID += last - firstIndependent; remainingNodes.resize(firstIndependent); std: : vector < std: :pair < NodeID, bool > >(remainingNodes).swap(remainingNodes); p.printStatus(levelID); } for (unsigned threadNum = 0; threadNum < maxThreads; threadNum++) { delete threadData[threadNum]; } //cout << "[contractor] checking sanity of generated data ..." << flush; _CheckCH < _EdgeData > (); //cout << "ok" << endl; } template < class Edge > void GetEdges(std::vector < Edge > &edges) { NodeID numberOfNodes = _graph->GetNumberOfNodes(); for (NodeID node = 0; node < numberOfNodes; ++node) { for (_DynamicGraph: :EdgeIterator edge = _graph->BeginEdges(node), endEdges = _graph->EndEdges(node); edge < endEdges; edge++) { const NodeID target = _graph->GetTarget(edge); const _EdgeData & data = _graph->GetEdgeData(edge); Edge newEdge; newEdge.source = node; newEdge.target = target; newEdge.data.distance = data.distance; newEdge.data.shortcut = data.shortcut; if (data.shortcut) { newEdge.data.middleName.middle = data.middleName.middle; newEdge.data.type = -1; } else { newEdge.data.middleName.nameID = data.middleName.nameID; //newEdge.data.type = data.type; //assert(newEdge.data.type >= 0); } newEdge.data.forward = data.forward; newEdge.data.backward = data.backward; edges.push_back(newEdge); } } } private: bool _ConstructCH(_DynamicGraph * _graph); void _Dijkstra(NodeID source, const int maxDistance, const unsigned numTargets, _ThreadData * data) { _Heap & heap = data->heap; unsigned nodes = 0; while (heap.Size() > 0) { const NodeID node = heap.DeleteMin(); const int distance = heap.GetKey(node); if (nodes++ > numTargets) return; //Destination settled ? if (distance > maxDistance) return; //iterate over all edges of node for (_DynamicGraph: : EdgeIterator edge = _graph->BeginEdges(node), endEdges = _graph->EndEdges(node); edge != endEdges; ++edge) { const _EdgeData & data = _graph->GetEdgeData(edge); if (!data.forward) continue; const NodeID to = _graph->GetTarget(edge); const int toDistance = distance + data.distance; //New Node discovered->Add to Heap + Node Info Storage if (!heap.WasInserted(to)) heap.Insert(to, toDistance, _HeapData()); //Found a shorter Path->Update distance else if (toDistance < heap.GetKey(to)) { heap.DecreaseKey(to, toDistance); //heap.GetData(to).hops = hops + 1; } } } } double _Evaluate(_ThreadData * data, _PriorityData * nodeData, NodeID node) { _ContractionInformation stats; //perform simulated contraction _Contract < true > (data, node, &stats); //Result will contain the priority if (stats.edgesDeleted == 0 || stats.originalEdgesDeleted == 0) return depthFactor * nodeData->depth; return edgeQuotionFactor * (((double)stats.edgesAdded) / stats.edgesDeleted) + originalQuotientFactor * (((double)stats.originalEdgesAdded) / stats.originalEdgesDeleted) + depthFactor * nodeData->depth; } template < class Edge > bool _CheckCH() { NodeID numberOfNodes = _graph->GetNumberOfNodes(); for (NodeID node = 0; node < numberOfNodes; ++node) { for (_DynamicGraph: :EdgeIterator edge = _graph->BeginEdges(node), endEdges = _graph->EndEdges(node); edge != endEdges; ++edge) { const NodeID start = node; const NodeID target = _graph->GetTarget(edge); const _EdgeData & data = _graph->GetEdgeData(edge); const NodeID middle = data.middleName.middle; assert(start != target); if (data.shortcut) { if (_graph->FindEdge(start, middle) == SPECIAL_EDGEID && _graph->FindEdge(middle, start) == SPECIAL_EDGEID) { assert(false); return false; } if (_graph->FindEdge(middle, target) == SPECIAL_EDGEID && _graph->FindEdge(target, middle) == SPECIAL_EDGEID) { assert(false); return false; } } } } return true; } template < bool Simulate > bool _Contract(_ThreadData * data, NodeID node, _ContractionInformation * stats = NULL) { _Heap & heap = data->heap; for (_DynamicGraph: :EdgeIterator inEdge = _graph->BeginEdges(node), endInEdges = _graph->EndEdges(node); inEdge != endInEdges; ++inEdge) { const _EdgeData & inData = _graph->GetEdgeData(inEdge); const NodeID source = _graph->GetTarget(inEdge); if (Simulate) { assert(stats != NULL); unsigned factor = (inData.forward && inData.backward ? 2 : 1); stats->edgesDeleted += factor; stats->originalEdgesDeleted += factor * inData.originalEdges; } if (!inData.backward) continue; heap.Clear(); heap.Insert(source, 0, _HeapData()); if (node != source) heap.Insert(node, inData.distance, _HeapData()); int maxDistance = 0; //unsigned numTargets = 0; for (_DynamicGraph: :EdgeIterator outEdge = _graph->BeginEdges(node), endOutEdges = _graph->EndEdges(node); outEdge != endOutEdges; ++outEdge) { const _EdgeData & outData = _graph->GetEdgeData(outEdge); if (!outData.forward) continue; const NodeID target = _graph->GetTarget(outEdge); const int pathDistance = inData.distance + outData.distance; maxDistance = std: :max(maxDistance, pathDistance); if (!heap.WasInserted(target)) heap.Insert(target, pathDistance, _HeapData(true)); else if (pathDistance < heap.GetKey(target)) heap.DecreaseKey(target, pathDistance); } if (Simulate) _Dijkstra(source, maxDistance, 500, data); else _Dijkstra(source, maxDistance, 1000, data); for (_DynamicGraph: :EdgeIterator outEdge = _graph->BeginEdges(node), endOutEdges = _graph->EndEdges(node); outEdge != endOutEdges; ++outEdge) { const _EdgeData & outData = _graph->GetEdgeData(outEdge); if (!outData.forward) continue; const NodeID target = _graph->GetTarget(outEdge); const int pathDistance = inData.distance + outData.distance; const int distance = heap.GetKey(target); if (pathDistance <= distance) { if (Simulate) { assert(stats != NULL); stats->edgesAdded++; stats->originalEdgesAdded += (outData.originalEdges + inData.originalEdges); } else { _ImportEdge newEdge; newEdge.source = source; newEdge.target = target; newEdge.data.distance = pathDistance; newEdge.data.forward = true; newEdge.data.backward = false; newEdge.data.middleName.middle = node; newEdge.data.shortcut = true; newEdge.data.originalEdges = outData.originalEdges + inData.originalEdges; data->insertedEdges.push_back(newEdge); std: : swap(newEdge.source, newEdge.target); newEdge.data.forward = false; newEdge.data.backward = true; data->insertedEdges.push_back(newEdge); } } } } return true; } bool _DeleteIncomingEdges(_ThreadData * data, NodeID node) { std::vector < NodeID > neighbours; //find all neighbours for (_DynamicGraph: :EdgeIterator e = _graph->BeginEdges(node); e < _graph->EndEdges(node); ++e) { const NodeID u = _graph->GetTarget(e); if (u == node) continue; neighbours.push_back(u); } //eliminate duplicate entries(forward + backward edges) std: : sort(neighbours.begin(), neighbours.end()); neighbours.resize(std: :unique(neighbours.begin(), neighbours.end()) - neighbours.begin()); for (int i = 0, e = (int)neighbours.size(); i < e; ++i) { const NodeID u = neighbours[i]; _graph->DeleteEdgesTo(u, node); } return true; } bool _UpdateNeighbours(std::vector < double >*priorities, std::vector < _PriorityData > *nodeData, _ThreadData * data, NodeID node) { std::vector < NodeID > neighbours; //find all neighbours for (_DynamicGraph: :EdgeIterator e = _graph->BeginEdges(node); e < _graph->EndEdges(node); ++e) { const NodeID u = _graph->GetTarget(e); if (u == node) continue; neighbours.push_back(u); (*nodeData)[u].depth = std: : max((*nodeData)[node].depth + 1, (*nodeData)[u].depth); } //eliminate duplicate entries(forward + backward edges) std: : sort(neighbours.begin(), neighbours.end()); neighbours.resize(std: :unique(neighbours.begin(), neighbours.end()) - neighbours.begin()); for (int i = 0, e = (int)neighbours.size(); i < e; ++i) { const NodeID u = neighbours[i]; (*priorities)[u] = _Evaluate(data, &(*nodeData)[u], u); } return true; } bool _IsIndependent(const _DynamicGraph * _graph, const std::vector < double >&priorities, const std::vector < _PriorityData > &nodeData, NodeID node) { const double priority = priorities[node]; std: : vector < NodeID > neighbours; for (_DynamicGraph: : EdgeIterator e = _graph->BeginEdges(node); e < _graph->EndEdges(node); ++e) { const NodeID target = _graph->GetTarget(e); const double targetPriority = priorities[target]; assert(targetPriority >= 0); //found a neighbour with lower priority ? if (priority > targetPriority) return false; //tie breaking if (priority == targetPriority && nodeData[node].bias < nodeData[target].bias) return false; neighbours.push_back(target); } std: : sort(neighbours.begin(), neighbours.end()); neighbours.resize(std: :unique(neighbours.begin(), neighbours.end()) - neighbours.begin()); //examine all neighbours that are at most 2 hops away for (std: : vector < NodeID >: :const_iterator i = neighbours.begin(), lastNode = neighbours.end(); i != lastNode; ++i) { const NodeID u = *i; for (_DynamicGraph: :EdgeIterator e = _graph->BeginEdges(u); e < _graph->EndEdges(u); ++e) { const NodeID target = _graph->GetTarget(e); const double targetPriority = priorities[target]; assert(targetPriority >= 0); //found a neighbour with lower priority ? if (priority > targetPriority) return false; //tie breaking if (priority == targetPriority && nodeData[node].bias < nodeData[target].bias) return false; } } return true; } _DynamicGraph *_graph; std: :vector < NodeID > *_components; unsigned edgeQuotionFactor; unsigned originalQuotientFactor; unsigned depthFactor; }; #endif /* // CONTRACTOR_H_INCLUDED */
GB_binop__isle_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint8) // A*D function (colscale): GB (_AxD__isle_uint8) // D*A function (rowscale): GB (_DxB__isle_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__isle_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__isle_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint8) // C=scalar+B GB (_bind1st__isle_uint8) // C=scalar+B' GB (_bind1st_tran__isle_uint8) // C=A+scalar GB (_bind2nd__isle_uint8) // C=A'+scalar GB (_bind2nd_tran__isle_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_UINT8 || GxB_NO_ISLE_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isle_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isle_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint8) // A*D function (colscale): GB (_AxD__isle_uint8) // D*A function (rowscale): GB (_DxB__isle_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__isle_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__isle_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint8) // C=scalar+B GB (_bind1st__isle_uint8) // C=scalar+B' GB (_bind1st_tran__isle_uint8) // C=A+scalar GB (_bind2nd__isle_uint8) // C=A'+scalar GB (_bind2nd_tran__isle_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_UINT8 || GxB_NO_ISLE_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isle_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isle_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint8) // A*D function (colscale): GB (_AxD__isle_uint8) // D*A function (rowscale): GB (_DxB__isle_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__isle_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__isle_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint8) // C=scalar+B GB (_bind1st__isle_uint8) // C=scalar+B' GB (_bind1st_tran__isle_uint8) // C=A+scalar GB (_bind2nd__isle_uint8) // C=A'+scalar GB (_bind2nd_tran__isle_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_UINT8 || GxB_NO_ISLE_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isle_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isle_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_int32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_uint64 // op(A') function: GB_tran__abs_int32_uint64 // C type: int32_t // A type: uint64_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_uint64 ( int32_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_uint64 // op(A') function: GB_tran__abs_int32_uint64 // C type: int32_t // A type: uint64_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_uint64 ( int32_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_uint64 // op(A') function: GB_tran__abs_int32_uint64 // C type: int32_t // A type: uint64_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_uint64 ( int32_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fast_math.c
/* Generated by Cython 0.29.12 */ /* BEGIN: Cython Metadata { "distutils": { "depends": [], "extra_compile_args": [ "/openmp" ], "name": "quantas.utils.math.fast_math", "sources": [ "quantas/utils/math/fast_math.pyx" ] }, "module_name": "quantas.utils.math.fast_math" } END: Cython Metadata */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_12" #define CYTHON_HEX_VERSION 0x001D0CF0 #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #define PyObject_Unicode PyObject_Str #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__quantas__utils__math__fast_math #define __PYX_HAVE_API__quantas__utils__math__fast_math /* Early includes */ #include "math.h" #include "pythread.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "quantas\\utils\\math\\fast_math.pyx", "stringsource", }; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":279 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":961 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":961 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* None.proto */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE long __Pyx_div_long(long, long); /* WriteUnraisableException.proto */ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *, int writable_flag); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp); static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'cython.view' */ /* Module declarations from 'cython' */ /* Module declarations from 'quantas.utils.math.fast_math' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static double __pyx_f_7quantas_5utils_4math_9fast_math_sInterp(double, double, double); /*proto*/ static PyObject *__pyx_f_7quantas_5utils_4math_9fast_math_vector(double, double, int __pyx_skip_dispatch); /*proto*/ static PyObject *__pyx_f_7quantas_5utils_4math_9fast_math_cofactor(__Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "quantas.utils.math.fast_math" extern int __pyx_module_is_main_quantas__utils__math__fast_math; int __pyx_module_is_main_quantas__utils__math__fast_math = 0; /* Implementation of 'quantas.utils.math.fast_math' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_O[] = "O"; static const char __pyx_k_X[] = "X"; static const char __pyx_k_Y[] = "Y"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_i[] = "i"; static const char __pyx_k_j[] = "j"; static const char __pyx_k_k[] = "k"; static const char __pyx_k_x[] = "x"; static const char __pyx_k_R2[] = "R2"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_nc[] = "nc"; static const char __pyx_k_np[] = "np"; static const char __pyx_k_nv[] = "nv"; static const char __pyx_k_nx[] = "nx"; static const char __pyx_k_ny[] = "ny"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_phi[] = "phi"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ybar[] = "ybar"; static const char __pyx_k_yhat[] = "yhat"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_dtype[] = "dtype"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_ssreg[] = "ssreg"; static const char __pyx_k_sstot[] = "sstot"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_theta[] = "theta"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_Y_view[] = "Y_view"; static const char __pyx_k_coeffs[] = "coeffs"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_result[] = "result"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_R2_view[] = "R2_view"; static const char __pyx_k_float64[] = "float64"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_ndarray[] = "ndarray"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_multi_R2[] = "multi_R2"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_ybar_view[] = "ybar_view"; static const char __pyx_k_yhat_view[] = "yhat_view"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_ssreg_view[] = "ssreg_view"; static const char __pyx_k_sstot_view[] = "sstot_view"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_result_view[] = "result_view"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_multi_interpolate[] = "multi_interpolate"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_multi_interpolate_array[] = "multi_interpolate_array"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_multi_interpolate_scalar[] = "multi_interpolate_scalar"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_quantas_utils_math_fast_math[] = "quantas.utils.math.fast_math"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_quantas_utils_math_fast_math_pyx[] = "quantas\\utils\\math\\fast_math.pyx"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_R2; static PyObject *__pyx_n_s_R2_view; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s_X; static PyObject *__pyx_n_s_Y; static PyObject *__pyx_n_s_Y_view; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_coeffs; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_float64; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_k; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_multi_R2; static PyObject *__pyx_n_s_multi_interpolate; static PyObject *__pyx_n_s_multi_interpolate_array; static PyObject *__pyx_n_s_multi_interpolate_scalar; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_nc; static PyObject *__pyx_n_s_ndarray; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_nv; static PyObject *__pyx_n_s_nx; static PyObject *__pyx_n_s_ny; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_phi; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_quantas_utils_math_fast_math; static PyObject *__pyx_kp_s_quantas_utils_math_fast_math_pyx; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_result; static PyObject *__pyx_n_s_result_view; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_ssreg; static PyObject *__pyx_n_s_ssreg_view; static PyObject *__pyx_n_s_sstot; static PyObject *__pyx_n_s_sstot_view; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_theta; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_x; static PyObject *__pyx_n_s_ybar; static PyObject *__pyx_n_s_ybar_view; static PyObject *__pyx_n_s_yhat; static PyObject *__pyx_n_s_yhat_view; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_pf_7quantas_5utils_4math_9fast_math_multi_interpolate_array(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_coeffs); /* proto */ static PyObject *__pyx_pf_7quantas_5utils_4math_9fast_math_2multi_interpolate_scalar(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_x, __Pyx_memviewslice __pyx_v_coeffs); /* proto */ static PyObject *__pyx_pf_7quantas_5utils_4math_9fast_math_4multi_interpolate(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, PyObject *__pyx_v_coeffs); /* proto */ static PyObject *__pyx_pf_7quantas_5utils_4math_9fast_math_6multi_R2(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_Y, __Pyx_memviewslice __pyx_v_coeffs); /* proto */ static PyObject *__pyx_pf_7quantas_5utils_4math_9fast_math_8vector(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_theta, double __pyx_v_phi); /* proto */ static PyObject *__pyx_pf_7quantas_5utils_4math_9fast_math_10cofactor(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_mat); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_3; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__18; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__20; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__26; static PyObject *__pyx_tuple__28; static PyObject *__pyx_tuple__30; static PyObject *__pyx_tuple__31; static PyObject *__pyx_tuple__32; static PyObject *__pyx_tuple__33; static PyObject *__pyx_tuple__34; static PyObject *__pyx_tuple__35; static PyObject *__pyx_codeobj__23; static PyObject *__pyx_codeobj__25; static PyObject *__pyx_codeobj__27; static PyObject *__pyx_codeobj__29; static PyObject *__pyx_codeobj__36; /* Late includes */ /* "quantas/utils/math/fast_math.pyx":22 * double cos(double x) * * cdef double sInterp(double x, double n, double coeff) nogil: # <<<<<<<<<<<<<< * return coeff * pow(x, n) * */ static double __pyx_f_7quantas_5utils_4math_9fast_math_sInterp(double __pyx_v_x, double __pyx_v_n, double __pyx_v_coeff) { double __pyx_r; /* "quantas/utils/math/fast_math.pyx":23 * * cdef double sInterp(double x, double n, double coeff) nogil: * return coeff * pow(x, n) # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __pyx_r = (__pyx_v_coeff * pow(__pyx_v_x, __pyx_v_n)); goto __pyx_L0; /* "quantas/utils/math/fast_math.pyx":22 * double cos(double x) * * cdef double sInterp(double x, double n, double coeff) nogil: # <<<<<<<<<<<<<< * return coeff * pow(x, n) * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "quantas/utils/math/fast_math.pyx":27 * @cython.boundscheck(False) * @cython.wraparound(False) * def multi_interpolate_array(double[::1] X, double[:,::1] coeffs): # <<<<<<<<<<<<<< * cdef Py_ssize_t nv = coeffs.shape[0] * cdef Py_ssize_t nc = coeffs.shape[1] */ /* Python wrapper */ static PyObject *__pyx_pw_7quantas_5utils_4math_9fast_math_1multi_interpolate_array(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_7quantas_5utils_4math_9fast_math_1multi_interpolate_array = {"multi_interpolate_array", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7quantas_5utils_4math_9fast_math_1multi_interpolate_array, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_7quantas_5utils_4math_9fast_math_1multi_interpolate_array(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_X = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_coeffs = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("multi_interpolate_array (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_X,&__pyx_n_s_coeffs,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_coeffs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("multi_interpolate_array", 1, 2, 2, 1); __PYX_ERR(0, 27, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "multi_interpolate_array") < 0)) __PYX_ERR(0, 27, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_X = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_X.memview)) __PYX_ERR(0, 27, __pyx_L3_error) __pyx_v_coeffs = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_coeffs.memview)) __PYX_ERR(0, 27, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("multi_interpolate_array", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 27, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("quantas.utils.math.fast_math.multi_interpolate_array", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_7quantas_5utils_4math_9fast_math_multi_interpolate_array(__pyx_self, __pyx_v_X, __pyx_v_coeffs); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_7quantas_5utils_4math_9fast_math_multi_interpolate_array(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_coeffs) { Py_ssize_t __pyx_v_nv; Py_ssize_t __pyx_v_nc; Py_ssize_t __pyx_v_nx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_j; Py_ssize_t __pyx_v_k; PyObject *__pyx_v_result = NULL; __Pyx_memviewslice __pyx_v_result_view = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; __Pyx_RefNannySetupContext("multi_interpolate_array", 0); /* "quantas/utils/math/fast_math.pyx":28 * @cython.wraparound(False) * def multi_interpolate_array(double[::1] X, double[:,::1] coeffs): * cdef Py_ssize_t nv = coeffs.shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t nc = coeffs.shape[1] * cdef Py_ssize_t nx = X.shape[0] */ __pyx_v_nv = (__pyx_v_coeffs.shape[0]); /* "quantas/utils/math/fast_math.pyx":29 * def multi_interpolate_array(double[::1] X, double[:,::1] coeffs): * cdef Py_ssize_t nv = coeffs.shape[0] * cdef Py_ssize_t nc = coeffs.shape[1] # <<<<<<<<<<<<<< * cdef Py_ssize_t nx = X.shape[0] * cdef Py_ssize_t i, j, k */ __pyx_v_nc = (__pyx_v_coeffs.shape[1]); /* "quantas/utils/math/fast_math.pyx":30 * cdef Py_ssize_t nv = coeffs.shape[0] * cdef Py_ssize_t nc = coeffs.shape[1] * cdef Py_ssize_t nx = X.shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t i, j, k * */ __pyx_v_nx = (__pyx_v_X.shape[0]); /* "quantas/utils/math/fast_math.pyx":33 * cdef Py_ssize_t i, j, k * * result = np.zeros( (nv,nx), dtype=np.float64 ) # <<<<<<<<<<<<<< * cdef double[:,::1] result_view = result * */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_nv); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_nx); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_result = __pyx_t_5; __pyx_t_5 = 0; /* "quantas/utils/math/fast_math.pyx":34 * * result = np.zeros( (nv,nx), dtype=np.float64 ) * cdef double[:,::1] result_view = result # <<<<<<<<<<<<<< * * for i in prange(nv, nogil=True): */ __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_result, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 34, __pyx_L1_error) __pyx_v_result_view = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "quantas/utils/math/fast_math.pyx":36 * cdef double[:,::1] result_view = result * * for i in prange(nv, nogil=True): # <<<<<<<<<<<<<< * for j in range(nc): * for k in range(nx): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_7 = __pyx_v_nv; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_9 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_9 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) #endif /* _OPENMP */ for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){ { __pyx_v_i = (Py_ssize_t)(0 + 1 * __pyx_t_8); /* Initialize private variables to invalid values */ __pyx_v_j = ((Py_ssize_t)0xbad0bad0); __pyx_v_k = ((Py_ssize_t)0xbad0bad0); /* "quantas/utils/math/fast_math.pyx":37 * * for i in prange(nv, nogil=True): * for j in range(nc): # <<<<<<<<<<<<<< * for k in range(nx): * result_view[i, k] += sInterp( */ __pyx_t_10 = __pyx_v_nc; __pyx_t_11 = __pyx_t_10; for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_j = __pyx_t_12; /* "quantas/utils/math/fast_math.pyx":38 * for i in prange(nv, nogil=True): * for j in range(nc): * for k in range(nx): # <<<<<<<<<<<<<< * result_view[i, k] += sInterp( * X[k], j, coeffs[i,j]) */ __pyx_t_13 = __pyx_v_nx; __pyx_t_14 = __pyx_t_13; for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) { __pyx_v_k = __pyx_t_15; /* "quantas/utils/math/fast_math.pyx":40 * for k in range(nx): * result_view[i, k] += sInterp( * X[k], j, coeffs[i,j]) # <<<<<<<<<<<<<< * * return result */ __pyx_t_16 = __pyx_v_k; __pyx_t_17 = __pyx_v_i; __pyx_t_18 = __pyx_v_j; /* "quantas/utils/math/fast_math.pyx":39 * for j in range(nc): * for k in range(nx): * result_view[i, k] += sInterp( # <<<<<<<<<<<<<< * X[k], j, coeffs[i,j]) * */ __pyx_t_19 = __pyx_v_i; __pyx_t_20 = __pyx_v_k; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_result_view.data + __pyx_t_19 * __pyx_v_result_view.strides[0]) )) + __pyx_t_20)) )) += __pyx_f_7quantas_5utils_4math_9fast_math_sInterp((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_X.data) + __pyx_t_16)) ))), __pyx_v_j, (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_coeffs.data + __pyx_t_17 * __pyx_v_coeffs.strides[0]) )) + __pyx_t_18)) )))); } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "quantas/utils/math/fast_math.pyx":36 * cdef double[:,::1] result_view = result * * for i in prange(nv, nogil=True): # <<<<<<<<<<<<<< * for j in range(nc): * for k in range(nx): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "quantas/utils/math/fast_math.pyx":42 * X[k], j, coeffs[i,j]) * * return result # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "quantas/utils/math/fast_math.pyx":27 * @cython.boundscheck(False) * @cython.wraparound(False) * def multi_interpolate_array(double[::1] X, double[:,::1] coeffs): # <<<<<<<<<<<<<< * cdef Py_ssize_t nv = coeffs.shape[0] * cdef Py_ssize_t nc = coeffs.shape[1] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __Pyx_AddTraceback("quantas.utils.math.fast_math.multi_interpolate_array", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __PYX_XDEC_MEMVIEW(&__pyx_v_result_view, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_X, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_coeffs, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "quantas/utils/math/fast_math.pyx":46 * @cython.boundscheck(False) * @cython.wraparound(False) * def multi_interpolate_scalar(double x, double[:,::1] coeffs): # <<<<<<<<<<<<<< * cdef Py_ssize_t nv = coeffs.shape[0] * cdef Py_ssize_t nc = coeffs.shape[1] */ /* Python wrapper */ static PyObject *__pyx_pw_7quantas_5utils_4math_9fast_math_3multi_interpolate_scalar(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_7quantas_5utils_4math_9fast_math_3multi_interpolate_scalar = {"multi_interpolate_scalar", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7quantas_5utils_4math_9fast_math_3multi_interpolate_scalar, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_7quantas_5utils_4math_9fast_math_3multi_interpolate_scalar(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_x; __Pyx_memviewslice __pyx_v_coeffs = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("multi_interpolate_scalar (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x,&__pyx_n_s_coeffs,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_coeffs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("multi_interpolate_scalar", 1, 2, 2, 1); __PYX_ERR(0, 46, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "multi_interpolate_scalar") < 0)) __PYX_ERR(0, 46, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_x = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_x == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 46, __pyx_L3_error) __pyx_v_coeffs = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_coeffs.memview)) __PYX_ERR(0, 46, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("multi_interpolate_scalar", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 46, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("quantas.utils.math.fast_math.multi_interpolate_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_7quantas_5utils_4math_9fast_math_2multi_interpolate_scalar(__pyx_self, __pyx_v_x, __pyx_v_coeffs); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_7quantas_5utils_4math_9fast_math_2multi_interpolate_scalar(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_x, __Pyx_memviewslice __pyx_v_coeffs) { Py_ssize_t __pyx_v_nv; Py_ssize_t __pyx_v_nc; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_j; PyObject *__pyx_v_result = NULL; __Pyx_memviewslice __pyx_v_result_view = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; __Pyx_RefNannySetupContext("multi_interpolate_scalar", 0); /* "quantas/utils/math/fast_math.pyx":47 * @cython.wraparound(False) * def multi_interpolate_scalar(double x, double[:,::1] coeffs): * cdef Py_ssize_t nv = coeffs.shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t nc = coeffs.shape[1] * cdef Py_ssize_t i, j */ __pyx_v_nv = (__pyx_v_coeffs.shape[0]); /* "quantas/utils/math/fast_math.pyx":48 * def multi_interpolate_scalar(double x, double[:,::1] coeffs): * cdef Py_ssize_t nv = coeffs.shape[0] * cdef Py_ssize_t nc = coeffs.shape[1] # <<<<<<<<<<<<<< * cdef Py_ssize_t i, j * */ __pyx_v_nc = (__pyx_v_coeffs.shape[1]); /* "quantas/utils/math/fast_math.pyx":51 * cdef Py_ssize_t i, j * * result = np.zeros( nv, dtype=np.float64 ) # <<<<<<<<<<<<<< * cdef double[::1] result_view = result * */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 51, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 51, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_nv); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 51, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 51, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 51, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 51, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 51, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 51, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 51, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_result = __pyx_t_5; __pyx_t_5 = 0; /* "quantas/utils/math/fast_math.pyx":52 * * result = np.zeros( nv, dtype=np.float64 ) * cdef double[::1] result_view = result # <<<<<<<<<<<<<< * * for i in prange(nv, nogil=True): */ __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_v_result, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 52, __pyx_L1_error) __pyx_v_result_view = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "quantas/utils/math/fast_math.pyx":54 * cdef double[::1] result_view = result * * for i in prange(nv, nogil=True): # <<<<<<<<<<<<<< * for j in range(nc): * result_view[i] += sInterp(x, j, coeffs[i,j]) */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_7 = __pyx_v_nv; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_9 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_9 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) #endif /* _OPENMP */ for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){ { __pyx_v_i = (Py_ssize_t)(0 + 1 * __pyx_t_8); /* Initialize private variables to invalid values */ __pyx_v_j = ((Py_ssize_t)0xbad0bad0); /* "quantas/utils/math/fast_math.pyx":55 * * for i in prange(nv, nogil=True): * for j in range(nc): # <<<<<<<<<<<<<< * result_view[i] += sInterp(x, j, coeffs[i,j]) * */ __pyx_t_10 = __pyx_v_nc; __pyx_t_11 = __pyx_t_10; for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_j = __pyx_t_12; /* "quantas/utils/math/fast_math.pyx":56 * for i in prange(nv, nogil=True): * for j in range(nc): * result_view[i] += sInterp(x, j, coeffs[i,j]) # <<<<<<<<<<<<<< * * return result */ __pyx_t_13 = __pyx_v_i; __pyx_t_14 = __pyx_v_j; __pyx_t_15 = __pyx_v_i; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_result_view.data) + __pyx_t_15)) )) += __pyx_f_7quantas_5utils_4math_9fast_math_sInterp(__pyx_v_x, __pyx_v_j, (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_coeffs.data + __pyx_t_13 * __pyx_v_coeffs.strides[0]) )) + __pyx_t_14)) )))); } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "quantas/utils/math/fast_math.pyx":54 * cdef double[::1] result_view = result * * for i in prange(nv, nogil=True): # <<<<<<<<<<<<<< * for j in range(nc): * result_view[i] += sInterp(x, j, coeffs[i,j]) */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "quantas/utils/math/fast_math.pyx":58 * result_view[i] += sInterp(x, j, coeffs[i,j]) * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "quantas/utils/math/fast_math.pyx":46 * @cython.boundscheck(False) * @cython.wraparound(False) * def multi_interpolate_scalar(double x, double[:,::1] coeffs): # <<<<<<<<<<<<<< * cdef Py_ssize_t nv = coeffs.shape[0] * cdef Py_ssize_t nc = coeffs.shape[1] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __Pyx_AddTraceback("quantas.utils.math.fast_math.multi_interpolate_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __PYX_XDEC_MEMVIEW(&__pyx_v_result_view, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_coeffs, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "quantas/utils/math/fast_math.pyx":61 * * * def multi_interpolate(X, coeffs): # <<<<<<<<<<<<<< * if type(X) != np.ndarray: * return multi_interpolate_scalar(X, coeffs) */ /* Python wrapper */ static PyObject *__pyx_pw_7quantas_5utils_4math_9fast_math_5multi_interpolate(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_7quantas_5utils_4math_9fast_math_5multi_interpolate = {"multi_interpolate", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7quantas_5utils_4math_9fast_math_5multi_interpolate, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_7quantas_5utils_4math_9fast_math_5multi_interpolate(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; PyObject *__pyx_v_coeffs = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("multi_interpolate (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_X,&__pyx_n_s_coeffs,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_coeffs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("multi_interpolate", 1, 2, 2, 1); __PYX_ERR(0, 61, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "multi_interpolate") < 0)) __PYX_ERR(0, 61, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_X = values[0]; __pyx_v_coeffs = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("multi_interpolate", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 61, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("quantas.utils.math.fast_math.multi_interpolate", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_7quantas_5utils_4math_9fast_math_4multi_interpolate(__pyx_self, __pyx_v_X, __pyx_v_coeffs); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_7quantas_5utils_4math_9fast_math_4multi_interpolate(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, PyObject *__pyx_v_coeffs) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("multi_interpolate", 0); /* "quantas/utils/math/fast_math.pyx":62 * * def multi_interpolate(X, coeffs): * if type(X) != np.ndarray: # <<<<<<<<<<<<<< * return multi_interpolate_scalar(X, coeffs) * else: */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ndarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_RichCompare(((PyObject *)Py_TYPE(__pyx_v_X)), __pyx_t_2, Py_NE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_3) { /* "quantas/utils/math/fast_math.pyx":63 * def multi_interpolate(X, coeffs): * if type(X) != np.ndarray: * return multi_interpolate_scalar(X, coeffs) # <<<<<<<<<<<<<< * else: * return multi_interpolate_array(X, coeffs) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_multi_interpolate_scalar); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 63, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; __pyx_t_5 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); __pyx_t_5 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_X, __pyx_v_coeffs}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 63, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_X, __pyx_v_coeffs}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 63, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { __pyx_t_6 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 63, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_INCREF(__pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_5, __pyx_v_X); __Pyx_INCREF(__pyx_v_coeffs); __Pyx_GIVEREF(__pyx_v_coeffs); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_5, __pyx_v_coeffs); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 63, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "quantas/utils/math/fast_math.pyx":62 * * def multi_interpolate(X, coeffs): * if type(X) != np.ndarray: # <<<<<<<<<<<<<< * return multi_interpolate_scalar(X, coeffs) * else: */ } /* "quantas/utils/math/fast_math.pyx":65 * return multi_interpolate_scalar(X, coeffs) * else: * return multi_interpolate_array(X, coeffs) # <<<<<<<<<<<<<< * * */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_multi_interpolate_array); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 65, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = NULL; __pyx_t_5 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); __pyx_t_5 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_v_X, __pyx_v_coeffs}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 65, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_v_X, __pyx_v_coeffs}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 65, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { __pyx_t_4 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 65, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_INCREF(__pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_4, 0+__pyx_t_5, __pyx_v_X); __Pyx_INCREF(__pyx_v_coeffs); __Pyx_GIVEREF(__pyx_v_coeffs); PyTuple_SET_ITEM(__pyx_t_4, 1+__pyx_t_5, __pyx_v_coeffs); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 65, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; } /* "quantas/utils/math/fast_math.pyx":61 * * * def multi_interpolate(X, coeffs): # <<<<<<<<<<<<<< * if type(X) != np.ndarray: * return multi_interpolate_scalar(X, coeffs) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("quantas.utils.math.fast_math.multi_interpolate", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "quantas/utils/math/fast_math.pyx":70 * @cython.boundscheck(False) * @cython.wraparound(False) * def multi_R2(double[::1] X, double[:,::1] Y, double[:,::1] coeffs): # <<<<<<<<<<<<<< * cdef Py_ssize_t nx = X.shape[0] * cdef Py_ssize_t ny = Y.shape[0] */ /* Python wrapper */ static PyObject *__pyx_pw_7quantas_5utils_4math_9fast_math_7multi_R2(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_7quantas_5utils_4math_9fast_math_7multi_R2 = {"multi_R2", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7quantas_5utils_4math_9fast_math_7multi_R2, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_7quantas_5utils_4math_9fast_math_7multi_R2(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_X = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_Y = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_coeffs = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("multi_R2 (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_X,&__pyx_n_s_Y,&__pyx_n_s_coeffs,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_Y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("multi_R2", 1, 3, 3, 1); __PYX_ERR(0, 70, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_coeffs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("multi_R2", 1, 3, 3, 2); __PYX_ERR(0, 70, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "multi_R2") < 0)) __PYX_ERR(0, 70, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_X = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_X.memview)) __PYX_ERR(0, 70, __pyx_L3_error) __pyx_v_Y = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_Y.memview)) __PYX_ERR(0, 70, __pyx_L3_error) __pyx_v_coeffs = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_coeffs.memview)) __PYX_ERR(0, 70, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("multi_R2", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 70, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("quantas.utils.math.fast_math.multi_R2", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_7quantas_5utils_4math_9fast_math_6multi_R2(__pyx_self, __pyx_v_X, __pyx_v_Y, __pyx_v_coeffs); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_7quantas_5utils_4math_9fast_math_6multi_R2(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_Y, __Pyx_memviewslice __pyx_v_coeffs) { Py_ssize_t __pyx_v_nx; Py_ssize_t __pyx_v_ny; CYTHON_UNUSED Py_ssize_t __pyx_v_nc; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_j; PyObject *__pyx_v_yhat = NULL; PyObject *__pyx_v_ybar = NULL; PyObject *__pyx_v_ssreg = NULL; PyObject *__pyx_v_sstot = NULL; PyObject *__pyx_v_R2 = NULL; __Pyx_memviewslice __pyx_v_ybar_view = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_ssreg_view = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_sstot_view = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_R2_view = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_Y_view = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_yhat_view = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; __Pyx_memviewslice __pyx_t_8 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_9 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; double __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; Py_ssize_t __pyx_t_26; Py_ssize_t __pyx_t_27; Py_ssize_t __pyx_t_28; int __pyx_t_29; Py_ssize_t __pyx_t_30; Py_ssize_t __pyx_t_31; Py_ssize_t __pyx_t_32; double __pyx_t_33; Py_ssize_t __pyx_t_34; __Pyx_RefNannySetupContext("multi_R2", 0); /* "quantas/utils/math/fast_math.pyx":71 * @cython.wraparound(False) * def multi_R2(double[::1] X, double[:,::1] Y, double[:,::1] coeffs): * cdef Py_ssize_t nx = X.shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t ny = Y.shape[0] * cdef Py_ssize_t nc = coeffs.shape[1] */ __pyx_v_nx = (__pyx_v_X.shape[0]); /* "quantas/utils/math/fast_math.pyx":72 * def multi_R2(double[::1] X, double[:,::1] Y, double[:,::1] coeffs): * cdef Py_ssize_t nx = X.shape[0] * cdef Py_ssize_t ny = Y.shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t nc = coeffs.shape[1] * */ __pyx_v_ny = (__pyx_v_Y.shape[0]); /* "quantas/utils/math/fast_math.pyx":73 * cdef Py_ssize_t nx = X.shape[0] * cdef Py_ssize_t ny = Y.shape[0] * cdef Py_ssize_t nc = coeffs.shape[1] # <<<<<<<<<<<<<< * * cdef Py_ssize_t i, j */ __pyx_v_nc = (__pyx_v_coeffs.shape[1]); /* "quantas/utils/math/fast_math.pyx":77 * cdef Py_ssize_t i, j * * yhat = multi_interpolate_array(X, coeffs) # <<<<<<<<<<<<<< * ybar = np.zeros( ny, dtype=np.float64 ) * ssreg = np.zeros( ny, dtype=np.float64 ) */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_multi_interpolate_array); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 77, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_X, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 77, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __pyx_memoryview_fromslice(__pyx_v_coeffs, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 77, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = NULL; __pyx_t_6 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); __pyx_t_6 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_3, __pyx_t_4}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 77, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_3, __pyx_t_4}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 77, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 77, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 77, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_yhat = __pyx_t_1; __pyx_t_1 = 0; /* "quantas/utils/math/fast_math.pyx":78 * * yhat = multi_interpolate_array(X, coeffs) * ybar = np.zeros( ny, dtype=np.float64 ) # <<<<<<<<<<<<<< * ssreg = np.zeros( ny, dtype=np.float64 ) * sstot = np.zeros( ny, dtype=np.float64 ) */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_ny); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_float64); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_7, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_ybar = __pyx_t_3; __pyx_t_3 = 0; /* "quantas/utils/math/fast_math.pyx":79 * yhat = multi_interpolate_array(X, coeffs) * ybar = np.zeros( ny, dtype=np.float64 ) * ssreg = np.zeros( ny, dtype=np.float64 ) # <<<<<<<<<<<<<< * sstot = np.zeros( ny, dtype=np.float64 ) * R2 = np.zeros( ny, dtype=np.float64 ) */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_ny); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_float64); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_4) < 0) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_ssreg = __pyx_t_4; __pyx_t_4 = 0; /* "quantas/utils/math/fast_math.pyx":80 * ybar = np.zeros( ny, dtype=np.float64 ) * ssreg = np.zeros( ny, dtype=np.float64 ) * sstot = np.zeros( ny, dtype=np.float64 ) # <<<<<<<<<<<<<< * R2 = np.zeros( ny, dtype=np.float64 ) * */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 80, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 80, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_ny); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 80, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 80, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 80, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 80, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 80, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_2) < 0) __PYX_ERR(0, 80, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_7, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 80, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_sstot = __pyx_t_2; __pyx_t_2 = 0; /* "quantas/utils/math/fast_math.pyx":81 * ssreg = np.zeros( ny, dtype=np.float64 ) * sstot = np.zeros( ny, dtype=np.float64 ) * R2 = np.zeros( ny, dtype=np.float64 ) # <<<<<<<<<<<<<< * * cdef double[::1] ybar_view = ybar */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 81, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 81, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_ny); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 81, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 81, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 81, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 81, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_float64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_1) < 0) __PYX_ERR(0, 81, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_7, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_R2 = __pyx_t_1; __pyx_t_1 = 0; /* "quantas/utils/math/fast_math.pyx":83 * R2 = np.zeros( ny, dtype=np.float64 ) * * cdef double[::1] ybar_view = ybar # <<<<<<<<<<<<<< * cdef double[::1] ssreg_view = ssreg * cdef double[::1] sstot_view = sstot */ __pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_v_ybar, PyBUF_WRITABLE); if (unlikely(!__pyx_t_8.memview)) __PYX_ERR(0, 83, __pyx_L1_error) __pyx_v_ybar_view = __pyx_t_8; __pyx_t_8.memview = NULL; __pyx_t_8.data = NULL; /* "quantas/utils/math/fast_math.pyx":84 * * cdef double[::1] ybar_view = ybar * cdef double[::1] ssreg_view = ssreg # <<<<<<<<<<<<<< * cdef double[::1] sstot_view = sstot * cdef double[::1] R2_view = R2 */ __pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_v_ssreg, PyBUF_WRITABLE); if (unlikely(!__pyx_t_8.memview)) __PYX_ERR(0, 84, __pyx_L1_error) __pyx_v_ssreg_view = __pyx_t_8; __pyx_t_8.memview = NULL; __pyx_t_8.data = NULL; /* "quantas/utils/math/fast_math.pyx":85 * cdef double[::1] ybar_view = ybar * cdef double[::1] ssreg_view = ssreg * cdef double[::1] sstot_view = sstot # <<<<<<<<<<<<<< * cdef double[::1] R2_view = R2 * cdef double[:,::1] Y_view = Y */ __pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_v_sstot, PyBUF_WRITABLE); if (unlikely(!__pyx_t_8.memview)) __PYX_ERR(0, 85, __pyx_L1_error) __pyx_v_sstot_view = __pyx_t_8; __pyx_t_8.memview = NULL; __pyx_t_8.data = NULL; /* "quantas/utils/math/fast_math.pyx":86 * cdef double[::1] ssreg_view = ssreg * cdef double[::1] sstot_view = sstot * cdef double[::1] R2_view = R2 # <<<<<<<<<<<<<< * cdef double[:,::1] Y_view = Y * cdef double[:,::1] yhat_view = yhat */ __pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_v_R2, PyBUF_WRITABLE); if (unlikely(!__pyx_t_8.memview)) __PYX_ERR(0, 86, __pyx_L1_error) __pyx_v_R2_view = __pyx_t_8; __pyx_t_8.memview = NULL; __pyx_t_8.data = NULL; /* "quantas/utils/math/fast_math.pyx":87 * cdef double[::1] sstot_view = sstot * cdef double[::1] R2_view = R2 * cdef double[:,::1] Y_view = Y # <<<<<<<<<<<<<< * cdef double[:,::1] yhat_view = yhat * */ __PYX_INC_MEMVIEW(&__pyx_v_Y, 0); __pyx_v_Y_view = __pyx_v_Y; /* "quantas/utils/math/fast_math.pyx":88 * cdef double[::1] R2_view = R2 * cdef double[:,::1] Y_view = Y * cdef double[:,::1] yhat_view = yhat # <<<<<<<<<<<<<< * * for i in prange(ny, nogil=True): */ __pyx_t_9 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_yhat, PyBUF_WRITABLE); if (unlikely(!__pyx_t_9.memview)) __PYX_ERR(0, 88, __pyx_L1_error) __pyx_v_yhat_view = __pyx_t_9; __pyx_t_9.memview = NULL; __pyx_t_9.data = NULL; /* "quantas/utils/math/fast_math.pyx":90 * cdef double[:,::1] yhat_view = yhat * * for i in prange(ny, nogil=True): # <<<<<<<<<<<<<< * for j in range(nx): * ybar_view[i] += Y_view[i,j]/nx */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_10 = __pyx_v_ny; if (1 == 0) abort(); { Py_ssize_t __pyx_parallel_temp0 = ((Py_ssize_t)0xbad0bad0); Py_ssize_t __pyx_parallel_temp1 = ((Py_ssize_t)0xbad0bad0); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_12 = (__pyx_t_10 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_12 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) #endif /* _OPENMP */ for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_12; __pyx_t_11++){ if (__pyx_parallel_why < 2) { __pyx_v_i = (Py_ssize_t)(0 + 1 * __pyx_t_11); /* Initialize private variables to invalid values */ __pyx_v_j = ((Py_ssize_t)0xbad0bad0); /* "quantas/utils/math/fast_math.pyx":91 * * for i in prange(ny, nogil=True): * for j in range(nx): # <<<<<<<<<<<<<< * ybar_view[i] += Y_view[i,j]/nx * */ __pyx_t_13 = __pyx_v_nx; __pyx_t_14 = __pyx_t_13; for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) { __pyx_v_j = __pyx_t_15; /* "quantas/utils/math/fast_math.pyx":92 * for i in prange(ny, nogil=True): * for j in range(nx): * ybar_view[i] += Y_view[i,j]/nx # <<<<<<<<<<<<<< * * for i in prange(ny, nogil=True): */ __pyx_t_16 = __pyx_v_i; __pyx_t_17 = __pyx_v_j; __pyx_t_18 = (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_Y_view.data + __pyx_t_16 * __pyx_v_Y_view.strides[0]) )) + __pyx_t_17)) ))); if (unlikely(__pyx_v_nx == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif __PYX_ERR(0, 92, __pyx_L8_error) } __pyx_t_19 = __pyx_v_i; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_ybar_view.data) + __pyx_t_19)) )) += (__pyx_t_18 / __pyx_v_nx); } goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates0) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_i; __pyx_parallel_temp1 = __pyx_v_j; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_i = __pyx_parallel_temp0; __pyx_v_j = __pyx_parallel_temp1; switch (__pyx_parallel_why) { case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "quantas/utils/math/fast_math.pyx":90 * cdef double[:,::1] yhat_view = yhat * * for i in prange(ny, nogil=True): # <<<<<<<<<<<<<< * for j in range(nx): * ybar_view[i] += Y_view[i,j]/nx */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L4_error: { #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "quantas/utils/math/fast_math.pyx":94 * ybar_view[i] += Y_view[i,j]/nx * * for i in prange(ny, nogil=True): # <<<<<<<<<<<<<< * for j in range(nx): * ssreg_view[i] += pow(yhat_view[i,j]-ybar_view[i],2.) */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_12 = __pyx_v_ny; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_10 = (__pyx_t_12 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_10 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_26, __pyx_t_27) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) #endif /* _OPENMP */ for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11++){ { __pyx_v_i = (Py_ssize_t)(0 + 1 * __pyx_t_11); /* Initialize private variables to invalid values */ __pyx_v_j = ((Py_ssize_t)0xbad0bad0); /* "quantas/utils/math/fast_math.pyx":95 * * for i in prange(ny, nogil=True): * for j in range(nx): # <<<<<<<<<<<<<< * ssreg_view[i] += pow(yhat_view[i,j]-ybar_view[i],2.) * sstot_view[i] += pow(Y_view[i,j]-ybar_view[i],2.) */ __pyx_t_13 = __pyx_v_nx; __pyx_t_14 = __pyx_t_13; for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) { __pyx_v_j = __pyx_t_15; /* "quantas/utils/math/fast_math.pyx":96 * for i in prange(ny, nogil=True): * for j in range(nx): * ssreg_view[i] += pow(yhat_view[i,j]-ybar_view[i],2.) # <<<<<<<<<<<<<< * sstot_view[i] += pow(Y_view[i,j]-ybar_view[i],2.) * */ __pyx_t_20 = __pyx_v_i; __pyx_t_21 = __pyx_v_j; __pyx_t_22 = __pyx_v_i; __pyx_t_23 = __pyx_v_i; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_ssreg_view.data) + __pyx_t_23)) )) += pow(((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_yhat_view.data + __pyx_t_20 * __pyx_v_yhat_view.strides[0]) )) + __pyx_t_21)) ))) - (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_ybar_view.data) + __pyx_t_22)) )))), 2.); /* "quantas/utils/math/fast_math.pyx":97 * for j in range(nx): * ssreg_view[i] += pow(yhat_view[i,j]-ybar_view[i],2.) * sstot_view[i] += pow(Y_view[i,j]-ybar_view[i],2.) # <<<<<<<<<<<<<< * * for i in prange(ny, nogil=True): */ __pyx_t_24 = __pyx_v_i; __pyx_t_25 = __pyx_v_j; __pyx_t_26 = __pyx_v_i; __pyx_t_27 = __pyx_v_i; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_sstot_view.data) + __pyx_t_27)) )) += pow(((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_Y_view.data + __pyx_t_24 * __pyx_v_Y_view.strides[0]) )) + __pyx_t_25)) ))) - (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_ybar_view.data) + __pyx_t_26)) )))), 2.); } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "quantas/utils/math/fast_math.pyx":94 * ybar_view[i] += Y_view[i,j]/nx * * for i in prange(ny, nogil=True): # <<<<<<<<<<<<<< * for j in range(nx): * ssreg_view[i] += pow(yhat_view[i,j]-ybar_view[i],2.) */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L16; } __pyx_L16:; } } /* "quantas/utils/math/fast_math.pyx":99 * sstot_view[i] += pow(Y_view[i,j]-ybar_view[i],2.) * * for i in prange(ny, nogil=True): # <<<<<<<<<<<<<< * if sstot_view[i] == 0.: * R2_view[i] = 0. */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_10 = __pyx_v_ny; if (1 == 0) abort(); { Py_ssize_t __pyx_parallel_temp0 = ((Py_ssize_t)0xbad0bad0); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_12 = (__pyx_t_10 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_12 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_18, __pyx_t_28, __pyx_t_29, __pyx_t_30, __pyx_t_31, __pyx_t_32, __pyx_t_33, __pyx_t_34) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) #endif /* _OPENMP */ for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_12; __pyx_t_11++){ if (__pyx_parallel_why < 2) { __pyx_v_i = (Py_ssize_t)(0 + 1 * __pyx_t_11); /* "quantas/utils/math/fast_math.pyx":100 * * for i in prange(ny, nogil=True): * if sstot_view[i] == 0.: # <<<<<<<<<<<<<< * R2_view[i] = 0. * else: */ __pyx_t_28 = __pyx_v_i; __pyx_t_29 = (((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_sstot_view.data) + __pyx_t_28)) ))) == 0.) != 0); if (__pyx_t_29) { /* "quantas/utils/math/fast_math.pyx":101 * for i in prange(ny, nogil=True): * if sstot_view[i] == 0.: * R2_view[i] = 0. # <<<<<<<<<<<<<< * else: * R2_view[i] = ssreg_view[i]/sstot_view[i] */ __pyx_t_30 = __pyx_v_i; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_R2_view.data) + __pyx_t_30)) )) = 0.; /* "quantas/utils/math/fast_math.pyx":100 * * for i in prange(ny, nogil=True): * if sstot_view[i] == 0.: # <<<<<<<<<<<<<< * R2_view[i] = 0. * else: */ goto __pyx_L32; } /* "quantas/utils/math/fast_math.pyx":103 * R2_view[i] = 0. * else: * R2_view[i] = ssreg_view[i]/sstot_view[i] # <<<<<<<<<<<<<< * return R2 * */ /*else*/ { __pyx_t_31 = __pyx_v_i; __pyx_t_18 = (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_ssreg_view.data) + __pyx_t_31)) ))); __pyx_t_32 = __pyx_v_i; __pyx_t_33 = (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_sstot_view.data) + __pyx_t_32)) ))); if (unlikely(__pyx_t_33 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif __PYX_ERR(0, 103, __pyx_L30_error) } __pyx_t_34 = __pyx_v_i; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_R2_view.data) + __pyx_t_34)) )) = (__pyx_t_18 / __pyx_t_33); } __pyx_L32:; goto __pyx_L34; __pyx_L30_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L33; __pyx_L33:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates1) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_i; } __pyx_L34:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_i = __pyx_parallel_temp0; switch (__pyx_parallel_why) { case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L26_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "quantas/utils/math/fast_math.pyx":99 * sstot_view[i] += pow(Y_view[i,j]-ybar_view[i],2.) * * for i in prange(ny, nogil=True): # <<<<<<<<<<<<<< * if sstot_view[i] == 0.: * R2_view[i] = 0. */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L27; } __pyx_L26_error: { #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L27:; } } /* "quantas/utils/math/fast_math.pyx":104 * else: * R2_view[i] = ssreg_view[i]/sstot_view[i] * return R2 # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_R2); __pyx_r = __pyx_v_R2; goto __pyx_L0; /* "quantas/utils/math/fast_math.pyx":70 * @cython.boundscheck(False) * @cython.wraparound(False) * def multi_R2(double[::1] X, double[:,::1] Y, double[:,::1] coeffs): # <<<<<<<<<<<<<< * cdef Py_ssize_t nx = X.shape[0] * cdef Py_ssize_t ny = Y.shape[0] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __PYX_XDEC_MEMVIEW(&__pyx_t_8, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_9, 1); __Pyx_AddTraceback("quantas.utils.math.fast_math.multi_R2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_yhat); __Pyx_XDECREF(__pyx_v_ybar); __Pyx_XDECREF(__pyx_v_ssreg); __Pyx_XDECREF(__pyx_v_sstot); __Pyx_XDECREF(__pyx_v_R2); __PYX_XDEC_MEMVIEW(&__pyx_v_ybar_view, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_ssreg_view, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_sstot_view, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_R2_view, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Y_view, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_yhat_view, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_X, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Y, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_coeffs, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "quantas/utils/math/fast_math.pyx":109 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef vector(double theta, double phi): # <<<<<<<<<<<<<< * vec = np.zeros( 3, dtype=np.float64 ) * cdef double[::1] v = vec */ static PyObject *__pyx_pw_7quantas_5utils_4math_9fast_math_9vector(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_f_7quantas_5utils_4math_9fast_math_vector(double __pyx_v_theta, double __pyx_v_phi, CYTHON_UNUSED int __pyx_skip_dispatch) { PyObject *__pyx_v_vec = NULL; __Pyx_memviewslice __pyx_v_v = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_6; Py_ssize_t __pyx_t_7; Py_ssize_t __pyx_t_8; __Pyx_RefNannySetupContext("vector", 0); /* "quantas/utils/math/fast_math.pyx":110 * @cython.wraparound(False) * cpdef vector(double theta, double phi): * vec = np.zeros( 3, dtype=np.float64 ) # <<<<<<<<<<<<<< * cdef double[::1] v = vec * v[0] = sin(theta)*cos(phi) */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 110, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 110, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 110, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 110, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_float64); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 110, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_4) < 0) __PYX_ERR(0, 110, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_tuple_, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 110, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_vec = __pyx_t_4; __pyx_t_4 = 0; /* "quantas/utils/math/fast_math.pyx":111 * cpdef vector(double theta, double phi): * vec = np.zeros( 3, dtype=np.float64 ) * cdef double[::1] v = vec # <<<<<<<<<<<<<< * v[0] = sin(theta)*cos(phi) * v[1] = sin(theta)*sin(phi) */ __pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_v_vec, PyBUF_WRITABLE); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 111, __pyx_L1_error) __pyx_v_v = __pyx_t_5; __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; /* "quantas/utils/math/fast_math.pyx":112 * vec = np.zeros( 3, dtype=np.float64 ) * cdef double[::1] v = vec * v[0] = sin(theta)*cos(phi) # <<<<<<<<<<<<<< * v[1] = sin(theta)*sin(phi) * v[2] = cos(theta) */ __pyx_t_6 = 0; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_v.data) + __pyx_t_6)) )) = (sin(__pyx_v_theta) * cos(__pyx_v_phi)); /* "quantas/utils/math/fast_math.pyx":113 * cdef double[::1] v = vec * v[0] = sin(theta)*cos(phi) * v[1] = sin(theta)*sin(phi) # <<<<<<<<<<<<<< * v[2] = cos(theta) * return vec */ __pyx_t_7 = 1; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_v.data) + __pyx_t_7)) )) = (sin(__pyx_v_theta) * sin(__pyx_v_phi)); /* "quantas/utils/math/fast_math.pyx":114 * v[0] = sin(theta)*cos(phi) * v[1] = sin(theta)*sin(phi) * v[2] = cos(theta) # <<<<<<<<<<<<<< * return vec * */ __pyx_t_8 = 2; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_v.data) + __pyx_t_8)) )) = cos(__pyx_v_theta); /* "quantas/utils/math/fast_math.pyx":115 * v[1] = sin(theta)*sin(phi) * v[2] = cos(theta) * return vec # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_vec); __pyx_r = __pyx_v_vec; goto __pyx_L0; /* "quantas/utils/math/fast_math.pyx":109 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef vector(double theta, double phi): # <<<<<<<<<<<<<< * vec = np.zeros( 3, dtype=np.float64 ) * cdef double[::1] v = vec */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __PYX_XDEC_MEMVIEW(&__pyx_t_5, 1); __Pyx_AddTraceback("quantas.utils.math.fast_math.vector", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_vec); __PYX_XDEC_MEMVIEW(&__pyx_v_v, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_7quantas_5utils_4math_9fast_math_9vector(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pw_7quantas_5utils_4math_9fast_math_9vector(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_theta; double __pyx_v_phi; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("vector (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_theta,&__pyx_n_s_phi,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_theta)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_phi)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("vector", 1, 2, 2, 1); __PYX_ERR(0, 109, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vector") < 0)) __PYX_ERR(0, 109, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_theta = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_theta == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 109, __pyx_L3_error) __pyx_v_phi = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_phi == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 109, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("vector", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 109, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("quantas.utils.math.fast_math.vector", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_7quantas_5utils_4math_9fast_math_8vector(__pyx_self, __pyx_v_theta, __pyx_v_phi); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_7quantas_5utils_4math_9fast_math_8vector(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_theta, double __pyx_v_phi) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("vector", 0); __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_f_7quantas_5utils_4math_9fast_math_vector(__pyx_v_theta, __pyx_v_phi, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 109, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("quantas.utils.math.fast_math.vector", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "quantas/utils/math/fast_math.pyx":119 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef cofactor(double[:,::1] mat): # <<<<<<<<<<<<<< * cof = np.zeros( (3,3), dtype=np.float64 ) * cdef double[:,::1] c = cof */ static PyObject *__pyx_pw_7quantas_5utils_4math_9fast_math_11cofactor(PyObject *__pyx_self, PyObject *__pyx_arg_mat); /*proto*/ static PyObject *__pyx_f_7quantas_5utils_4math_9fast_math_cofactor(__Pyx_memviewslice __pyx_v_mat, CYTHON_UNUSED int __pyx_skip_dispatch) { PyObject *__pyx_v_cof = NULL; __Pyx_memviewslice __pyx_v_c = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_m = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_6; Py_ssize_t __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; Py_ssize_t __pyx_t_26; Py_ssize_t __pyx_t_27; Py_ssize_t __pyx_t_28; Py_ssize_t __pyx_t_29; Py_ssize_t __pyx_t_30; Py_ssize_t __pyx_t_31; Py_ssize_t __pyx_t_32; Py_ssize_t __pyx_t_33; Py_ssize_t __pyx_t_34; Py_ssize_t __pyx_t_35; Py_ssize_t __pyx_t_36; Py_ssize_t __pyx_t_37; Py_ssize_t __pyx_t_38; Py_ssize_t __pyx_t_39; Py_ssize_t __pyx_t_40; Py_ssize_t __pyx_t_41; Py_ssize_t __pyx_t_42; Py_ssize_t __pyx_t_43; Py_ssize_t __pyx_t_44; Py_ssize_t __pyx_t_45; Py_ssize_t __pyx_t_46; Py_ssize_t __pyx_t_47; Py_ssize_t __pyx_t_48; Py_ssize_t __pyx_t_49; Py_ssize_t __pyx_t_50; Py_ssize_t __pyx_t_51; Py_ssize_t __pyx_t_52; Py_ssize_t __pyx_t_53; Py_ssize_t __pyx_t_54; Py_ssize_t __pyx_t_55; Py_ssize_t __pyx_t_56; Py_ssize_t __pyx_t_57; Py_ssize_t __pyx_t_58; Py_ssize_t __pyx_t_59; Py_ssize_t __pyx_t_60; Py_ssize_t __pyx_t_61; Py_ssize_t __pyx_t_62; Py_ssize_t __pyx_t_63; Py_ssize_t __pyx_t_64; Py_ssize_t __pyx_t_65; Py_ssize_t __pyx_t_66; Py_ssize_t __pyx_t_67; Py_ssize_t __pyx_t_68; Py_ssize_t __pyx_t_69; Py_ssize_t __pyx_t_70; Py_ssize_t __pyx_t_71; Py_ssize_t __pyx_t_72; Py_ssize_t __pyx_t_73; Py_ssize_t __pyx_t_74; Py_ssize_t __pyx_t_75; Py_ssize_t __pyx_t_76; Py_ssize_t __pyx_t_77; Py_ssize_t __pyx_t_78; Py_ssize_t __pyx_t_79; Py_ssize_t __pyx_t_80; Py_ssize_t __pyx_t_81; Py_ssize_t __pyx_t_82; Py_ssize_t __pyx_t_83; Py_ssize_t __pyx_t_84; Py_ssize_t __pyx_t_85; Py_ssize_t __pyx_t_86; Py_ssize_t __pyx_t_87; Py_ssize_t __pyx_t_88; Py_ssize_t __pyx_t_89; Py_ssize_t __pyx_t_90; Py_ssize_t __pyx_t_91; Py_ssize_t __pyx_t_92; Py_ssize_t __pyx_t_93; Py_ssize_t __pyx_t_94; Py_ssize_t __pyx_t_95; __Pyx_RefNannySetupContext("cofactor", 0); /* "quantas/utils/math/fast_math.pyx":120 * @cython.wraparound(False) * cpdef cofactor(double[:,::1] mat): * cof = np.zeros( (3,3), dtype=np.float64 ) # <<<<<<<<<<<<<< * cdef double[:,::1] c = cof * cdef double[:,::1] m = mat */ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 120, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 120, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 120, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 120, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_float64); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 120, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_4) < 0) __PYX_ERR(0, 120, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_tuple__3, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 120, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cof = __pyx_t_4; __pyx_t_4 = 0; /* "quantas/utils/math/fast_math.pyx":121 * cpdef cofactor(double[:,::1] mat): * cof = np.zeros( (3,3), dtype=np.float64 ) * cdef double[:,::1] c = cof # <<<<<<<<<<<<<< * cdef double[:,::1] m = mat * c[0][0] = m[1][1]*m[2][2] - m[1][2]*m[2][1] */ __pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_cof, PyBUF_WRITABLE); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 121, __pyx_L1_error) __pyx_v_c = __pyx_t_5; __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; /* "quantas/utils/math/fast_math.pyx":122 * cof = np.zeros( (3,3), dtype=np.float64 ) * cdef double[:,::1] c = cof * cdef double[:,::1] m = mat # <<<<<<<<<<<<<< * c[0][0] = m[1][1]*m[2][2] - m[1][2]*m[2][1] * c[0][1] = m[1][2]*m[2][0] - m[1][0]*m[2][2] */ __PYX_INC_MEMVIEW(&__pyx_v_mat, 0); __pyx_v_m = __pyx_v_mat; /* "quantas/utils/math/fast_math.pyx":123 * cdef double[:,::1] c = cof * cdef double[:,::1] m = mat * c[0][0] = m[1][1]*m[2][2] - m[1][2]*m[2][1] # <<<<<<<<<<<<<< * c[0][1] = m[1][2]*m[2][0] - m[1][0]*m[2][2] * c[0][2] = m[1][0]*m[2][1] - m[1][1]*m[2][0] */ __pyx_t_6 = 1; __pyx_t_7 = 1; __pyx_t_8 = 2; __pyx_t_9 = 2; __pyx_t_10 = 1; __pyx_t_11 = 2; __pyx_t_12 = 2; __pyx_t_13 = 1; __pyx_t_14 = 0; __pyx_t_15 = 0; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_c.data + __pyx_t_14 * __pyx_v_c.strides[0]) )) + __pyx_t_15)) )) = (((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_6 * __pyx_v_m.strides[0]) )) + __pyx_t_7)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_8 * __pyx_v_m.strides[0]) )) + __pyx_t_9)) )))) - ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_10 * __pyx_v_m.strides[0]) )) + __pyx_t_11)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_12 * __pyx_v_m.strides[0]) )) + __pyx_t_13)) ))))); /* "quantas/utils/math/fast_math.pyx":124 * cdef double[:,::1] m = mat * c[0][0] = m[1][1]*m[2][2] - m[1][2]*m[2][1] * c[0][1] = m[1][2]*m[2][0] - m[1][0]*m[2][2] # <<<<<<<<<<<<<< * c[0][2] = m[1][0]*m[2][1] - m[1][1]*m[2][0] * c[1][0] = m[0][2]*m[2][1] - m[0][1]*m[2][2] */ __pyx_t_16 = 1; __pyx_t_17 = 2; __pyx_t_18 = 2; __pyx_t_19 = 0; __pyx_t_20 = 1; __pyx_t_21 = 0; __pyx_t_22 = 2; __pyx_t_23 = 2; __pyx_t_24 = 0; __pyx_t_25 = 1; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_c.data + __pyx_t_24 * __pyx_v_c.strides[0]) )) + __pyx_t_25)) )) = (((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_16 * __pyx_v_m.strides[0]) )) + __pyx_t_17)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_18 * __pyx_v_m.strides[0]) )) + __pyx_t_19)) )))) - ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_20 * __pyx_v_m.strides[0]) )) + __pyx_t_21)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_22 * __pyx_v_m.strides[0]) )) + __pyx_t_23)) ))))); /* "quantas/utils/math/fast_math.pyx":125 * c[0][0] = m[1][1]*m[2][2] - m[1][2]*m[2][1] * c[0][1] = m[1][2]*m[2][0] - m[1][0]*m[2][2] * c[0][2] = m[1][0]*m[2][1] - m[1][1]*m[2][0] # <<<<<<<<<<<<<< * c[1][0] = m[0][2]*m[2][1] - m[0][1]*m[2][2] * c[1][1] = m[0][0]*m[2][2] - m[0][2]*m[2][0] */ __pyx_t_26 = 1; __pyx_t_27 = 0; __pyx_t_28 = 2; __pyx_t_29 = 1; __pyx_t_30 = 1; __pyx_t_31 = 1; __pyx_t_32 = 2; __pyx_t_33 = 0; __pyx_t_34 = 0; __pyx_t_35 = 2; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_c.data + __pyx_t_34 * __pyx_v_c.strides[0]) )) + __pyx_t_35)) )) = (((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_26 * __pyx_v_m.strides[0]) )) + __pyx_t_27)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_28 * __pyx_v_m.strides[0]) )) + __pyx_t_29)) )))) - ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_30 * __pyx_v_m.strides[0]) )) + __pyx_t_31)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_32 * __pyx_v_m.strides[0]) )) + __pyx_t_33)) ))))); /* "quantas/utils/math/fast_math.pyx":126 * c[0][1] = m[1][2]*m[2][0] - m[1][0]*m[2][2] * c[0][2] = m[1][0]*m[2][1] - m[1][1]*m[2][0] * c[1][0] = m[0][2]*m[2][1] - m[0][1]*m[2][2] # <<<<<<<<<<<<<< * c[1][1] = m[0][0]*m[2][2] - m[0][2]*m[2][0] * c[1][2] = m[0][1]*m[2][0] - m[0][0]*m[2][1] */ __pyx_t_36 = 0; __pyx_t_37 = 2; __pyx_t_38 = 2; __pyx_t_39 = 1; __pyx_t_40 = 0; __pyx_t_41 = 1; __pyx_t_42 = 2; __pyx_t_43 = 2; __pyx_t_44 = 1; __pyx_t_45 = 0; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_c.data + __pyx_t_44 * __pyx_v_c.strides[0]) )) + __pyx_t_45)) )) = (((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_36 * __pyx_v_m.strides[0]) )) + __pyx_t_37)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_38 * __pyx_v_m.strides[0]) )) + __pyx_t_39)) )))) - ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_40 * __pyx_v_m.strides[0]) )) + __pyx_t_41)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_42 * __pyx_v_m.strides[0]) )) + __pyx_t_43)) ))))); /* "quantas/utils/math/fast_math.pyx":127 * c[0][2] = m[1][0]*m[2][1] - m[1][1]*m[2][0] * c[1][0] = m[0][2]*m[2][1] - m[0][1]*m[2][2] * c[1][1] = m[0][0]*m[2][2] - m[0][2]*m[2][0] # <<<<<<<<<<<<<< * c[1][2] = m[0][1]*m[2][0] - m[0][0]*m[2][1] * c[2][0] = m[0][1]*m[1][2] - m[0][2]*m[1][1] */ __pyx_t_46 = 0; __pyx_t_47 = 0; __pyx_t_48 = 2; __pyx_t_49 = 2; __pyx_t_50 = 0; __pyx_t_51 = 2; __pyx_t_52 = 2; __pyx_t_53 = 0; __pyx_t_54 = 1; __pyx_t_55 = 1; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_c.data + __pyx_t_54 * __pyx_v_c.strides[0]) )) + __pyx_t_55)) )) = (((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_46 * __pyx_v_m.strides[0]) )) + __pyx_t_47)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_48 * __pyx_v_m.strides[0]) )) + __pyx_t_49)) )))) - ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_50 * __pyx_v_m.strides[0]) )) + __pyx_t_51)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_52 * __pyx_v_m.strides[0]) )) + __pyx_t_53)) ))))); /* "quantas/utils/math/fast_math.pyx":128 * c[1][0] = m[0][2]*m[2][1] - m[0][1]*m[2][2] * c[1][1] = m[0][0]*m[2][2] - m[0][2]*m[2][0] * c[1][2] = m[0][1]*m[2][0] - m[0][0]*m[2][1] # <<<<<<<<<<<<<< * c[2][0] = m[0][1]*m[1][2] - m[0][2]*m[1][1] * c[2][1] = m[0][2]*m[1][0] - m[0][0]*m[1][2] */ __pyx_t_56 = 0; __pyx_t_57 = 1; __pyx_t_58 = 2; __pyx_t_59 = 0; __pyx_t_60 = 0; __pyx_t_61 = 0; __pyx_t_62 = 2; __pyx_t_63 = 1; __pyx_t_64 = 1; __pyx_t_65 = 2; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_c.data + __pyx_t_64 * __pyx_v_c.strides[0]) )) + __pyx_t_65)) )) = (((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_56 * __pyx_v_m.strides[0]) )) + __pyx_t_57)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_58 * __pyx_v_m.strides[0]) )) + __pyx_t_59)) )))) - ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_60 * __pyx_v_m.strides[0]) )) + __pyx_t_61)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_62 * __pyx_v_m.strides[0]) )) + __pyx_t_63)) ))))); /* "quantas/utils/math/fast_math.pyx":129 * c[1][1] = m[0][0]*m[2][2] - m[0][2]*m[2][0] * c[1][2] = m[0][1]*m[2][0] - m[0][0]*m[2][1] * c[2][0] = m[0][1]*m[1][2] - m[0][2]*m[1][1] # <<<<<<<<<<<<<< * c[2][1] = m[0][2]*m[1][0] - m[0][0]*m[1][2] * c[2][2] = m[0][0]*m[1][1] - m[0][1]*m[1][0] */ __pyx_t_66 = 0; __pyx_t_67 = 1; __pyx_t_68 = 1; __pyx_t_69 = 2; __pyx_t_70 = 0; __pyx_t_71 = 2; __pyx_t_72 = 1; __pyx_t_73 = 1; __pyx_t_74 = 2; __pyx_t_75 = 0; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_c.data + __pyx_t_74 * __pyx_v_c.strides[0]) )) + __pyx_t_75)) )) = (((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_66 * __pyx_v_m.strides[0]) )) + __pyx_t_67)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_68 * __pyx_v_m.strides[0]) )) + __pyx_t_69)) )))) - ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_70 * __pyx_v_m.strides[0]) )) + __pyx_t_71)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_72 * __pyx_v_m.strides[0]) )) + __pyx_t_73)) ))))); /* "quantas/utils/math/fast_math.pyx":130 * c[1][2] = m[0][1]*m[2][0] - m[0][0]*m[2][1] * c[2][0] = m[0][1]*m[1][2] - m[0][2]*m[1][1] * c[2][1] = m[0][2]*m[1][0] - m[0][0]*m[1][2] # <<<<<<<<<<<<<< * c[2][2] = m[0][0]*m[1][1] - m[0][1]*m[1][0] * return */ __pyx_t_76 = 0; __pyx_t_77 = 2; __pyx_t_78 = 1; __pyx_t_79 = 0; __pyx_t_80 = 0; __pyx_t_81 = 0; __pyx_t_82 = 1; __pyx_t_83 = 2; __pyx_t_84 = 2; __pyx_t_85 = 1; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_c.data + __pyx_t_84 * __pyx_v_c.strides[0]) )) + __pyx_t_85)) )) = (((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_76 * __pyx_v_m.strides[0]) )) + __pyx_t_77)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_78 * __pyx_v_m.strides[0]) )) + __pyx_t_79)) )))) - ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_80 * __pyx_v_m.strides[0]) )) + __pyx_t_81)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_82 * __pyx_v_m.strides[0]) )) + __pyx_t_83)) ))))); /* "quantas/utils/math/fast_math.pyx":131 * c[2][0] = m[0][1]*m[1][2] - m[0][2]*m[1][1] * c[2][1] = m[0][2]*m[1][0] - m[0][0]*m[1][2] * c[2][2] = m[0][0]*m[1][1] - m[0][1]*m[1][0] # <<<<<<<<<<<<<< * return * */ __pyx_t_86 = 0; __pyx_t_87 = 0; __pyx_t_88 = 1; __pyx_t_89 = 1; __pyx_t_90 = 0; __pyx_t_91 = 1; __pyx_t_92 = 1; __pyx_t_93 = 0; __pyx_t_94 = 2; __pyx_t_95 = 2; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_c.data + __pyx_t_94 * __pyx_v_c.strides[0]) )) + __pyx_t_95)) )) = (((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_86 * __pyx_v_m.strides[0]) )) + __pyx_t_87)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_88 * __pyx_v_m.strides[0]) )) + __pyx_t_89)) )))) - ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_90 * __pyx_v_m.strides[0]) )) + __pyx_t_91)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_m.data + __pyx_t_92 * __pyx_v_m.strides[0]) )) + __pyx_t_93)) ))))); /* "quantas/utils/math/fast_math.pyx":132 * c[2][1] = m[0][2]*m[1][0] - m[0][0]*m[1][2] * c[2][2] = m[0][0]*m[1][1] - m[0][1]*m[1][0] * return # <<<<<<<<<<<<<< * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "quantas/utils/math/fast_math.pyx":119 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef cofactor(double[:,::1] mat): # <<<<<<<<<<<<<< * cof = np.zeros( (3,3), dtype=np.float64 ) * cdef double[:,::1] c = cof */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __PYX_XDEC_MEMVIEW(&__pyx_t_5, 1); __Pyx_AddTraceback("quantas.utils.math.fast_math.cofactor", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_cof); __PYX_XDEC_MEMVIEW(&__pyx_v_c, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_m, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_7quantas_5utils_4math_9fast_math_11cofactor(PyObject *__pyx_self, PyObject *__pyx_arg_mat); /*proto*/ static PyObject *__pyx_pw_7quantas_5utils_4math_9fast_math_11cofactor(PyObject *__pyx_self, PyObject *__pyx_arg_mat) { __Pyx_memviewslice __pyx_v_mat = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cofactor (wrapper)", 0); assert(__pyx_arg_mat); { __pyx_v_mat = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_arg_mat, PyBUF_WRITABLE); if (unlikely(!__pyx_v_mat.memview)) __PYX_ERR(0, 119, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("quantas.utils.math.fast_math.cofactor", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_7quantas_5utils_4math_9fast_math_10cofactor(__pyx_self, __pyx_v_mat); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_7quantas_5utils_4math_9fast_math_10cofactor(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_mat) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("cofactor", 0); __Pyx_XDECREF(__pyx_r); if (unlikely(!__pyx_v_mat.memview)) { __Pyx_RaiseUnboundLocalError("mat"); __PYX_ERR(0, 119, __pyx_L1_error) } __pyx_t_1 = __pyx_f_7quantas_5utils_4math_9fast_math_cofactor(__pyx_v_mat, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 119, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("quantas.utils.math.fast_math.cofactor", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_mat, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) } else { /* "View.MemoryView":123 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":129 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 129, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":130 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 133, __pyx_L1_error) /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 136, __pyx_L1_error) /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":139 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":140 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":141 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(1, 141, __pyx_L1_error) } __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) __pyx_v_self->format = __pyx_t_7; /* "View.MemoryView":144 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":145 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 148, __pyx_L1_error) /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_8 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_9; __pyx_v_idx = __pyx_t_8; __pyx_t_8 = (__pyx_t_8 + 1); /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":153 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 153, __pyx_L1_error) /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":154 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":158 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":159 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) if (likely(__pyx_t_4)) { /* "View.MemoryView":161 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":162 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":164 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 164, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":166 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":169 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":170 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 176, __pyx_L1_error) /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":179 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":180 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 180, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 180, __pyx_L1_error) } __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); __pyx_t_9 = __pyx_t_1; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "View.MemoryView":181 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":182 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":186 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":188 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 192, __pyx_L1_error) /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":193 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":194 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":195 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":196 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":197 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":198 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":199 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":200 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":203 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":205 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":207 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":216 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":218 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":219 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":223 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":227 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":228 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":231 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":234 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":237 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":240 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":249 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":252 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":253 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":255 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":282 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":284 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":6 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":13 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":17 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":300 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":304 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":307 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":309 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":346 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":347 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":349 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":351 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":352 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":356 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":357 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":361 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":364 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":366 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":368 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":370 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyThread_type_lock __pyx_t_6; PyThread_type_lock __pyx_t_7; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":374 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * * cdef int i */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ } /* "View.MemoryView":378 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":379 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":380 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":381 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":382 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":384 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":383 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; /* "View.MemoryView":382 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":385 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":380 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":387 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":378 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":389 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":391 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":393 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 393, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 393, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 393, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 393, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":394 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 394, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 394, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":393 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":396 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":389 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":399 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":400 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":401 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":400 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":403 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 403, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 403, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":406 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 406, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":407 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":406 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":409 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 409, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":410 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 410, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":399 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":412 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":413 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ __pyx_t_1 = (__pyx_v_self->view.readonly != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":414 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 414, __pyx_L1_error) /* "View.MemoryView":413 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ } /* "View.MemoryView":416 * raise TypeError("Cannot assign to read-only memoryview") * * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(__pyx_t_2 != Py_None)) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 416, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 416, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":418 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 418, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":419 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 419, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_obj = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":420 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 420, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":421 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 421, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 421, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":420 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L5; } /* "View.MemoryView":423 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 423, __pyx_L1_error) __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L5:; /* "View.MemoryView":418 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L4; } /* "View.MemoryView":425 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4:; /* "View.MemoryView":412 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":427 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":428 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":429 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":430 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 430, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":431 * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 431, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":430 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 430, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 430, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":429 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; /* "View.MemoryView":432 * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 432, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":433 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":429 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":428 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":435 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":427 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":437 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":441 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 441, __pyx_L1_error) /* "View.MemoryView":442 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 442, __pyx_L1_error) /* "View.MemoryView":443 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 443, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 443, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 443, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 443, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":441 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 441, __pyx_L1_error) /* "View.MemoryView":437 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":445 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; char const *__pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":447 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":452 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); /* "View.MemoryView":454 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_1) { /* "View.MemoryView":455 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":456 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_1 = ((__pyx_v_tmp == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":457 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 457, __pyx_L1_error) /* "View.MemoryView":456 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":458 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":454 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":460 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":462 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":463 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":464 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":463 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":466 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 466, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L8:; /* "View.MemoryView":470 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":471 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 471, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":470 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":472 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":475 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); } __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":445 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":477 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":478 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 478, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":479 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 479, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":477 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":481 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":484 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 484, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":487 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":488 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":489 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":488 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":493 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":494 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":493 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":495 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":490 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 490, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 490, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":491 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 491, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 491, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":488 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":481 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":497 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":500 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 500, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":505 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":506 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 506, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":505 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":508 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 508, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":510 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 510, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":511 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":510 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":511 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":497 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":514 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; char *__pyx_t_5; void *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":515 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = (__pyx_v_self->view.readonly != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":516 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 516, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 516, __pyx_L1_error) /* "View.MemoryView":515 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ } /* "View.MemoryView":518 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); if (__pyx_t_1) { /* "View.MemoryView":519 * * if flags & PyBUF_ND: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_4 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_4; /* "View.MemoryView":518 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L6; } /* "View.MemoryView":521 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L6:; /* "View.MemoryView":523 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":524 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_4 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_4; /* "View.MemoryView":523 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L7; } /* "View.MemoryView":526 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L7:; /* "View.MemoryView":528 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":529 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_4 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_4; /* "View.MemoryView":528 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L8; } /* "View.MemoryView":531 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L8:; /* "View.MemoryView":533 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":534 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_5 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_5; /* "View.MemoryView":533 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L9; } /* "View.MemoryView":536 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L9:; /* "View.MemoryView":538 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_6 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_6; /* "View.MemoryView":539 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_7 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_7; /* "View.MemoryView":540 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = self.view.readonly */ __pyx_t_8 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_8; /* "View.MemoryView":541 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = self.view.readonly * info.obj = self */ __pyx_t_8 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_8; /* "View.MemoryView":542 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = self.view.readonly # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_t_1 = __pyx_v_self->view.readonly; __pyx_v_info->readonly = __pyx_t_1; /* "View.MemoryView":543 * info.len = self.view.len * info.readonly = self.view.readonly * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":514 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":549 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":550 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 550, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":551 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 551, __pyx_L1_error) /* "View.MemoryView":552 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":549 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":555 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":556 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":555 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":559 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":560 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":559 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":566 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 566, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 566, __pyx_L1_error) /* "View.MemoryView":564 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":568 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 568, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 568, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 568, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 568, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":571 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":572 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":573 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 573, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__15, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 573, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":572 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":575 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 575, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":571 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":578 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":579 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":578 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":586 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":587 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":586 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":590 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":591 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":592 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":594 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 594, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":595 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 595, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":597 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":591 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":599 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":590 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":601 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":602 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":603 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":602 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":605 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":601 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":607 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":608 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":609 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 609, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "View.MemoryView":608 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":607 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":612 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":611 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":618 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":619 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 619, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":615 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":621 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":624 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":625 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 625, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":621 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":627 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":629 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":631 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":632 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 632, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":637 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 637, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":627 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":639 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":641 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":643 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":644 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 644, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":649 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 649, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":639 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":653 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":654 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 654, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 654, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 654, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 654, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":655 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":656 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":653 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":659 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":660 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":659 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":662 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":667 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":668 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 668, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":667 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":670 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":672 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":673 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":674 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":675 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 675, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 675, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 675, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 675, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":676 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":677 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":678 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 678, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 678, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__18); __Pyx_GIVEREF(__pyx_slice__18); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__18); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 678, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":679 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":677 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":681 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__18); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 681, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":682 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":676 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":684 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":685 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 685, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 685, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(1, 685, __pyx_L1_error) /* "View.MemoryView":684 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":687 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":688 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 688, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":675 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":690 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 690, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":691 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":692 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 692, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__18); __Pyx_GIVEREF(__pyx_slice__18); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__18); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":691 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":694 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L0; /* "View.MemoryView":662 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":696 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":697 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":698 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":699 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 699, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 699, __pyx_L1_error) /* "View.MemoryView":698 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":696 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":706 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":707 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":714 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); /* "View.MemoryView":718 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 718, __pyx_L1_error) } } #endif /* "View.MemoryView":720 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":721 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 721, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":722 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":720 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":724 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":725 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":731 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":732 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":737 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":738 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":742 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 742, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 742, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 742, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 742, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 742, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 742, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 742, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":743 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":747 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 747, __pyx_L1_error) /* "View.MemoryView":744 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 744, __pyx_L1_error) /* "View.MemoryView":743 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":750 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":751 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":752 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":753 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":754 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":750 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":756 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 756, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 756, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 756, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":757 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 757, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 757, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 757, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":758 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 758, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 758, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 758, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":760 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":761 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":762 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":764 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 764, __pyx_L1_error) /* "View.MemoryView":770 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":742 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":772 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":773 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":774 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 774, __pyx_L1_error) } /* "View.MemoryView":775 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 775, __pyx_L1_error) } /* "View.MemoryView":773 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 773, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":772 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":778 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":779 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 778, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":778 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 778, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":706 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":803 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":823 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":825 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":826 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":825 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":827 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":828 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 828, __pyx_L1_error) /* "View.MemoryView":827 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":823 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":831 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":833 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":834 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 834, __pyx_L1_error) /* "View.MemoryView":833 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":837 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":838 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":839 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":840 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":841 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":840 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":838 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":842 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":843 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":844 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":843 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":846 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":842 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":837 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":848 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":849 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":848 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":851 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":853 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":854 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":855 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":856 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":857 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":856 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":854 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":858 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":858 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":853 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":861 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":862 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":861 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":864 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":866 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":867 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":866 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":871 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":873 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":874 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":873 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":876 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":877 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":876 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":880 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":881 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":882 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":885 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":886 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":885 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":888 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":890 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":891 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":892 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":893 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":892 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":895 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":896 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 895, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":891 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":898 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":890 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":900 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":803 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":906 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":908 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":909 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":912 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":913 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 913, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 913, __pyx_L1_error) } __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); /* "View.MemoryView":914 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":912 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":916 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":917 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":918 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":919 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":918 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":921 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":922 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":923 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":924 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 924, __pyx_L1_error) /* "View.MemoryView":923 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":921 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":926 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":927 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 927, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 927, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 927, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 927, __pyx_L1_error) /* "View.MemoryView":926 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":929 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":930 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":931 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":930 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":933 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":906 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":939 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; /* "View.MemoryView":940 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":942 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":943 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":947 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); __pyx_t_4 = __pyx_t_3; for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":948 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":949 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; /* "View.MemoryView":950 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":952 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L6_bool_binop_done:; if (__pyx_t_7) { /* "View.MemoryView":953 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 953, __pyx_L1_error) /* "View.MemoryView":952 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":955 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":939 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":972 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":973 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":972 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":975 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":976 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":977 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 977, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":976 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":979 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 979, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":975 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":981 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":982 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":983 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 983, __pyx_L1_error) /* "View.MemoryView":982 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":985 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 985, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":981 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":988 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":989 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":988 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":995 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":1003 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":1004 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "View.MemoryView":1003 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1009 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1009, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1009, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1009, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1011 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1012 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1014 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1014, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1015 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1017 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1018 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1019 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1020 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1021 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: */ Py_INCREF(Py_None); /* "View.MemoryView":1023 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); if (__pyx_t_1) { /* "View.MemoryView":1024 * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * else: * result.flags = PyBUF_RECORDS_RO */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1023 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ goto __pyx_L4; } /* "View.MemoryView":1026 * result.flags = PyBUF_RECORDS * else: * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ /*else*/ { __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; } __pyx_L4:; /* "View.MemoryView":1028 * result.flags = PyBUF_RECORDS_RO * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1029 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1032 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1033 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1034 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1035 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1036 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L6_break; /* "View.MemoryView":1034 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L6_break:; /* "View.MemoryView":1038 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1039 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1039, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1040 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1040, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1040, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1040, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1042 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1043 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1045 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":995 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1048 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1051 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1052 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1052, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1053 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1051 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1055 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1056 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1048 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1059 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1063 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1064 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1065 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1067 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1068 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1070 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_dim = __pyx_t_4; /* "View.MemoryView":1071 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1072 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1073 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_5 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; } /* "View.MemoryView":1059 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1076 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1079 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1080 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1080, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1076 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1083 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1090 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1091 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1092 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1090 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1094 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1095 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1097 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1099 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1097, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1083 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1105 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1106 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1107 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1106 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1109 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1105 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1112 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1117 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1118 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1120 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1121 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1122 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1123 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1121 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1125 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_1; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1126 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1127 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1128 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1126 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1130 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1131 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1130 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1133 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1112 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1136 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1143 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1144 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1145 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1146 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1148 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1149 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1150 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1149 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1151 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); /* "View.MemoryView":1149 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1153 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1154 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); /* "View.MemoryView":1155 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1156 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1148 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1158 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1159 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1163 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1164 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1136 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1166 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1169 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1166 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1173 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1176 * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i * cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1178 * cdef Py_ssize_t size = src.memview.view.itemsize * * for i in range(ndim): # <<<<<<<<<<<<<< * size *= src.shape[i] * */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1179 * * for i in range(ndim): * size *= src.shape[i] # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i])); } /* "View.MemoryView":1181 * size *= src.shape[i] * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1173 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1184 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1193 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1194 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_idx = __pyx_t_4; /* "View.MemoryView":1195 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1196 * for idx in range(ndim): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1193 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1198 * stride = stride * shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1199 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1200 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1202 * stride = stride * shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1184 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1205 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_t_6; /* "View.MemoryView":1216 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1217 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1219 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1220 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1221 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1221, __pyx_L1_error) /* "View.MemoryView":1220 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1224 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1225 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1226 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1227 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1228 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1230 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); /* "View.MemoryView":1234 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1235 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1236 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1235 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1238 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1239 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); /* "View.MemoryView":1238 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1241 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1243 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1205 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1248 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1251 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1250 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 1250, __pyx_L1_error) /* "View.MemoryView":1248 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1254 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1255 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1255, __pyx_L1_error) /* "View.MemoryView":1254 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1258 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1259 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":1260 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1260, __pyx_L1_error) /* "View.MemoryView":1259 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1262 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1262, __pyx_L1_error) } /* "View.MemoryView":1258 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1265 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; void *__pyx_t_7; int __pyx_t_8; /* "View.MemoryView":1273 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1274 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1276 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1277 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1278 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1281 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1282 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1281 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1283 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1284 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1283 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1286 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1288 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_5; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1289 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1290 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1291 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1292 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1290 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1294 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1294, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1289 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1296 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1297 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) /* "View.MemoryView":1296 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1299 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1301 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1302 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1301 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1304 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1304, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_7; /* "View.MemoryView":1305 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1299 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1307 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1310 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1311 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1310 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1312 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1313 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1312 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1315 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1317 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1318 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); /* "View.MemoryView":1319 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1320 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1321 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1315 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1307 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1323 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_8 = (__pyx_t_2 != 0); if (__pyx_t_8) { /* "View.MemoryView":1326 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1326, __pyx_L1_error) /* "View.MemoryView":1327 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1327, __pyx_L1_error) /* "View.MemoryView":1323 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1329 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1330 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1331 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1333 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1334 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1265 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1337 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1341 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1343 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1344 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1345 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1346 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1348 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1349 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1350 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1351 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1337 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1359 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1363 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1364 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1363 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1359 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1368 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1371 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1368 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1374 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1378 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1379 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_4) { /* "View.MemoryView":1380 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_4 = (__pyx_v_inc != 0); if (__pyx_t_4) { /* "View.MemoryView":1381 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1380 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1383 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1379 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1385 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1386 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1388 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1374 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1394 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1397 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1398 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1400 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1394 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1404 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; /* "View.MemoryView":1408 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1409 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1411 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1412 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1413 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); /* "View.MemoryView":1414 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1411 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1416 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1417 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1419 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1404 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 13, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":14 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 14, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "quantas.utils.math.fast_math.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "quantas.utils.math.fast_math.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "quantas.utils.math.fast_math.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "quantas.utils.math.fast_math._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif }; static PyMethodDef __pyx_methods[] = { {"vector", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7quantas_5utils_4math_9fast_math_9vector, METH_VARARGS|METH_KEYWORDS, 0}, {"cofactor", (PyCFunction)__pyx_pw_7quantas_5utils_4math_9fast_math_11cofactor, METH_O, 0}, {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_fast_math(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_fast_math}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "fast_math", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_R2, __pyx_k_R2, sizeof(__pyx_k_R2), 0, 0, 1, 1}, {&__pyx_n_s_R2_view, __pyx_k_R2_view, sizeof(__pyx_k_R2_view), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s_X, __pyx_k_X, sizeof(__pyx_k_X), 0, 0, 1, 1}, {&__pyx_n_s_Y, __pyx_k_Y, sizeof(__pyx_k_Y), 0, 0, 1, 1}, {&__pyx_n_s_Y_view, __pyx_k_Y_view, sizeof(__pyx_k_Y_view), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_coeffs, __pyx_k_coeffs, sizeof(__pyx_k_coeffs), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_multi_R2, __pyx_k_multi_R2, sizeof(__pyx_k_multi_R2), 0, 0, 1, 1}, {&__pyx_n_s_multi_interpolate, __pyx_k_multi_interpolate, sizeof(__pyx_k_multi_interpolate), 0, 0, 1, 1}, {&__pyx_n_s_multi_interpolate_array, __pyx_k_multi_interpolate_array, sizeof(__pyx_k_multi_interpolate_array), 0, 0, 1, 1}, {&__pyx_n_s_multi_interpolate_scalar, __pyx_k_multi_interpolate_scalar, sizeof(__pyx_k_multi_interpolate_scalar), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_nc, __pyx_k_nc, sizeof(__pyx_k_nc), 0, 0, 1, 1}, {&__pyx_n_s_ndarray, __pyx_k_ndarray, sizeof(__pyx_k_ndarray), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_nv, __pyx_k_nv, sizeof(__pyx_k_nv), 0, 0, 1, 1}, {&__pyx_n_s_nx, __pyx_k_nx, sizeof(__pyx_k_nx), 0, 0, 1, 1}, {&__pyx_n_s_ny, __pyx_k_ny, sizeof(__pyx_k_ny), 0, 0, 1, 1}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_phi, __pyx_k_phi, sizeof(__pyx_k_phi), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_quantas_utils_math_fast_math, __pyx_k_quantas_utils_math_fast_math, sizeof(__pyx_k_quantas_utils_math_fast_math), 0, 0, 1, 1}, {&__pyx_kp_s_quantas_utils_math_fast_math_pyx, __pyx_k_quantas_utils_math_fast_math_pyx, sizeof(__pyx_k_quantas_utils_math_fast_math_pyx), 0, 0, 1, 0}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_result, __pyx_k_result, sizeof(__pyx_k_result), 0, 0, 1, 1}, {&__pyx_n_s_result_view, __pyx_k_result_view, sizeof(__pyx_k_result_view), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_ssreg, __pyx_k_ssreg, sizeof(__pyx_k_ssreg), 0, 0, 1, 1}, {&__pyx_n_s_ssreg_view, __pyx_k_ssreg_view, sizeof(__pyx_k_ssreg_view), 0, 0, 1, 1}, {&__pyx_n_s_sstot, __pyx_k_sstot, sizeof(__pyx_k_sstot), 0, 0, 1, 1}, {&__pyx_n_s_sstot_view, __pyx_k_sstot_view, sizeof(__pyx_k_sstot_view), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_theta, __pyx_k_theta, sizeof(__pyx_k_theta), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, {&__pyx_n_s_ybar, __pyx_k_ybar, sizeof(__pyx_k_ybar), 0, 0, 1, 1}, {&__pyx_n_s_ybar_view, __pyx_k_ybar_view, sizeof(__pyx_k_ybar_view), 0, 0, 1, 1}, {&__pyx_n_s_yhat, __pyx_k_yhat, sizeof(__pyx_k_yhat), 0, 0, 1, 1}, {&__pyx_n_s_yhat_view, __pyx_k_yhat_view, sizeof(__pyx_k_yhat_view), 0, 0, 1, 1}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 37, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 400, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 609, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 828, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "quantas/utils/math/fast_math.pyx":110 * @cython.wraparound(False) * cpdef vector(double theta, double phi): * vec = np.zeros( 3, dtype=np.float64 ) # <<<<<<<<<<<<<< * cdef double[::1] v = vec * v[0] = sin(theta)*cos(phi) */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_int_3); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 110, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "quantas/utils/math/fast_math.pyx":120 * @cython.wraparound(False) * cpdef cofactor(double[:,::1] mat): * cof = np.zeros( (3,3), dtype=np.float64 ) # <<<<<<<<<<<<<< * cdef double[:,::1] c = cof * cdef double[:,::1] m = mat */ __pyx_tuple__2 = PyTuple_Pack(2, __pyx_int_3, __pyx_int_3); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 120, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); __pyx_tuple__3 = PyTuple_Pack(1, __pyx_tuple__2); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 120, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":414 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":491 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "View.MemoryView":516 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 516, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "View.MemoryView":566 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 566, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":573 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__15 = PyTuple_New(1); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 573, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__15); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__15, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__15); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "View.MemoryView":678 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__18 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__18)) __PYX_ERR(1, 678, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__18); __Pyx_GIVEREF(__pyx_slice__18); /* "View.MemoryView":699 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 699, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__20); __Pyx_GIVEREF(__pyx_tuple__20); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* "quantas/utils/math/fast_math.pyx":27 * @cython.boundscheck(False) * @cython.wraparound(False) * def multi_interpolate_array(double[::1] X, double[:,::1] coeffs): # <<<<<<<<<<<<<< * cdef Py_ssize_t nv = coeffs.shape[0] * cdef Py_ssize_t nc = coeffs.shape[1] */ __pyx_tuple__22 = PyTuple_Pack(10, __pyx_n_s_X, __pyx_n_s_coeffs, __pyx_n_s_nv, __pyx_n_s_nc, __pyx_n_s_nx, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_result, __pyx_n_s_result_view); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(0, 27, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); __pyx_codeobj__23 = (PyObject*)__Pyx_PyCode_New(2, 0, 10, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__22, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_quantas_utils_math_fast_math_pyx, __pyx_n_s_multi_interpolate_array, 27, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__23)) __PYX_ERR(0, 27, __pyx_L1_error) /* "quantas/utils/math/fast_math.pyx":46 * @cython.boundscheck(False) * @cython.wraparound(False) * def multi_interpolate_scalar(double x, double[:,::1] coeffs): # <<<<<<<<<<<<<< * cdef Py_ssize_t nv = coeffs.shape[0] * cdef Py_ssize_t nc = coeffs.shape[1] */ __pyx_tuple__24 = PyTuple_Pack(8, __pyx_n_s_x, __pyx_n_s_coeffs, __pyx_n_s_nv, __pyx_n_s_nc, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_result, __pyx_n_s_result_view); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(0, 46, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); __pyx_codeobj__25 = (PyObject*)__Pyx_PyCode_New(2, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__24, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_quantas_utils_math_fast_math_pyx, __pyx_n_s_multi_interpolate_scalar, 46, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__25)) __PYX_ERR(0, 46, __pyx_L1_error) /* "quantas/utils/math/fast_math.pyx":61 * * * def multi_interpolate(X, coeffs): # <<<<<<<<<<<<<< * if type(X) != np.ndarray: * return multi_interpolate_scalar(X, coeffs) */ __pyx_tuple__26 = PyTuple_Pack(2, __pyx_n_s_X, __pyx_n_s_coeffs); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(0, 61, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_quantas_utils_math_fast_math_pyx, __pyx_n_s_multi_interpolate, 61, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(0, 61, __pyx_L1_error) /* "quantas/utils/math/fast_math.pyx":70 * @cython.boundscheck(False) * @cython.wraparound(False) * def multi_R2(double[::1] X, double[:,::1] Y, double[:,::1] coeffs): # <<<<<<<<<<<<<< * cdef Py_ssize_t nx = X.shape[0] * cdef Py_ssize_t ny = Y.shape[0] */ __pyx_tuple__28 = PyTuple_Pack(19, __pyx_n_s_X, __pyx_n_s_Y, __pyx_n_s_coeffs, __pyx_n_s_nx, __pyx_n_s_ny, __pyx_n_s_nc, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_yhat, __pyx_n_s_ybar, __pyx_n_s_ssreg, __pyx_n_s_sstot, __pyx_n_s_R2, __pyx_n_s_ybar_view, __pyx_n_s_ssreg_view, __pyx_n_s_sstot_view, __pyx_n_s_R2_view, __pyx_n_s_Y_view, __pyx_n_s_yhat_view); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__28); __Pyx_GIVEREF(__pyx_tuple__28); __pyx_codeobj__29 = (PyObject*)__Pyx_PyCode_New(3, 0, 19, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__28, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_quantas_utils_math_fast_math_pyx, __pyx_n_s_multi_R2, 70, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__29)) __PYX_ERR(0, 70, __pyx_L1_error) /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__30 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__30); __Pyx_GIVEREF(__pyx_tuple__30); /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__31 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__31); __Pyx_GIVEREF(__pyx_tuple__31); /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__32 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__32)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__32); __Pyx_GIVEREF(__pyx_tuple__32); /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__33 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__33)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__33); __Pyx_GIVEREF(__pyx_tuple__33); /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__34 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__34)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__34); __Pyx_GIVEREF(__pyx_tuple__34); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__35 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__35)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__35); __Pyx_GIVEREF(__pyx_tuple__35); __pyx_codeobj__36 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__35, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__36)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_array.tp_print = 0; #endif if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_MemviewEnum.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryview.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 961, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryviewslice.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 961, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 961, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #if PY_MAJOR_VERSION < 3 #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC void #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #else #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyObject * #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initfast_math(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initfast_math(void) #else __Pyx_PyMODINIT_FUNC PyInit_fast_math(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_fast_math(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_fast_math(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; static PyThread_type_lock __pyx_t_2[8]; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'fast_math' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_fast_math(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("fast_math", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_quantas__utils__math__fast_math) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "quantas.utils.math.fast_math")) { if (unlikely(PyDict_SetItemString(modules, "quantas.utils.math.fast_math", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) goto __pyx_L1_error; /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) goto __pyx_L1_error; /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() != 0)) goto __pyx_L1_error; (void)__Pyx_modinit_type_import_code(); (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "quantas/utils/math/fast_math.pyx":14 * from cython.parallel import prange * * import numpy as np # <<<<<<<<<<<<<< * * cdef extern from "math.h" nogil: */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "quantas/utils/math/fast_math.pyx":27 * @cython.boundscheck(False) * @cython.wraparound(False) * def multi_interpolate_array(double[::1] X, double[:,::1] coeffs): # <<<<<<<<<<<<<< * cdef Py_ssize_t nv = coeffs.shape[0] * cdef Py_ssize_t nc = coeffs.shape[1] */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7quantas_5utils_4math_9fast_math_1multi_interpolate_array, NULL, __pyx_n_s_quantas_utils_math_fast_math); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 27, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_multi_interpolate_array, __pyx_t_1) < 0) __PYX_ERR(0, 27, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "quantas/utils/math/fast_math.pyx":46 * @cython.boundscheck(False) * @cython.wraparound(False) * def multi_interpolate_scalar(double x, double[:,::1] coeffs): # <<<<<<<<<<<<<< * cdef Py_ssize_t nv = coeffs.shape[0] * cdef Py_ssize_t nc = coeffs.shape[1] */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7quantas_5utils_4math_9fast_math_3multi_interpolate_scalar, NULL, __pyx_n_s_quantas_utils_math_fast_math); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 46, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_multi_interpolate_scalar, __pyx_t_1) < 0) __PYX_ERR(0, 46, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "quantas/utils/math/fast_math.pyx":61 * * * def multi_interpolate(X, coeffs): # <<<<<<<<<<<<<< * if type(X) != np.ndarray: * return multi_interpolate_scalar(X, coeffs) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7quantas_5utils_4math_9fast_math_5multi_interpolate, NULL, __pyx_n_s_quantas_utils_math_fast_math); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_multi_interpolate, __pyx_t_1) < 0) __PYX_ERR(0, 61, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "quantas/utils/math/fast_math.pyx":70 * @cython.boundscheck(False) * @cython.wraparound(False) * def multi_R2(double[::1] X, double[:,::1] Y, double[:,::1] coeffs): # <<<<<<<<<<<<<< * cdef Py_ssize_t nx = X.shape[0] * cdef Py_ssize_t ny = Y.shape[0] */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7quantas_5utils_4math_9fast_math_7multi_R2, NULL, __pyx_n_s_quantas_utils_math_fast_math); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_multi_R2, __pyx_t_1) < 0) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "quantas/utils/math/fast_math.pyx":1 * # -*- coding: utf-8 -*- # <<<<<<<<<<<<<< * ############################################################################## * # Copyright (c), Gianfranco Ulian and Giovanni Valdre'. # */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__30, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__31, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__32, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__33, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__34, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_2[0] = PyThread_allocate_lock(); __pyx_t_2[1] = PyThread_allocate_lock(); __pyx_t_2[2] = PyThread_allocate_lock(); __pyx_t_2[3] = PyThread_allocate_lock(); __pyx_t_2[4] = PyThread_allocate_lock(); __pyx_t_2[5] = PyThread_allocate_lock(); __pyx_t_2[6] = PyThread_allocate_lock(); __pyx_t_2[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":545 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 545, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 545, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":991 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 991, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 991, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init quantas.utils.math.fast_math", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init quantas.utils.math.fast_math"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* None */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE long __Pyx_div_long(long a, long b) { long q = a / b; long r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_cython); if (unlikely(!reduce_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto BAD; setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate_cython); if (unlikely(!setstate_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto BAD; } PyType_Modified((PyTypeObject*)type_obj); } } goto GOOD; BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp) { return (PyObject *) PyFloat_FromDouble(*(double *) itemp); } static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj) { double value = __pyx_PyFloat_AsDouble(obj); if ((value == (double)-1) && PyErr_Occurred()) return 0; *(double *) itemp = value; return 1; } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__30, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__31, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__32, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__33, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__34, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_2[0] = PyThread_allocate_lock(); __pyx_t_2[1] = PyThread_allocate_lock(); __pyx_t_2[2] = PyThread_allocate_lock(); __pyx_t_2[3] = PyThread_allocate_lock(); __pyx_t_2[4] = PyThread_allocate_lock(); __pyx_t_2[5] = PyThread_allocate_lock(); __pyx_t_2[6] = PyThread_allocate_lock(); __pyx_t_2[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":545 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 545, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 545, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":991 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 991, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 991, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init quantas.utils.math.fast_math", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init quantas.utils.math.fast_math"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* None */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE long __Pyx_div_long(long a, long b) { long q = a / b; long r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_cython); if (unlikely(!reduce_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto BAD; setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate_cython); if (unlikely(!setstate_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto BAD; } PyType_Modified((PyTypeObject*)type_obj); } } goto GOOD; BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp) { return (PyObject *) PyFloat_FromDouble(*(double *) itemp); } static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj) { double value = __pyx_PyFloat_AsDouble(obj); if ((value == (double)-1) && PyErr_Occurred()) return 0; *(double *) itemp = value; return 1; } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__30, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__31, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__32, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__33, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__34, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_2[0] = PyThread_allocate_lock(); __pyx_t_2[1] = PyThread_allocate_lock(); __pyx_t_2[2] = PyThread_allocate_lock(); __pyx_t_2[3] = PyThread_allocate_lock(); __pyx_t_2[4] = PyThread_allocate_lock(); __pyx_t_2[5] = PyThread_allocate_lock(); __pyx_t_2[6] = PyThread_allocate_lock(); __pyx_t_2[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":545 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 545, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 545, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":991 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 991, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 991, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init quantas.utils.math.fast_math", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init quantas.utils.math.fast_math"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* None */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE long __Pyx_div_long(long a, long b) { long q = a / b; long r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_cython); if (unlikely(!reduce_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto BAD; setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate_cython); if (unlikely(!setstate_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto BAD; } PyType_Modified((PyTypeObject*)type_obj); } } goto GOOD; BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp) { return (PyObject *) PyFloat_FromDouble(*(double *) itemp); } static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj) { double value = __pyx_PyFloat_AsDouble(obj); if ((value == (double)-1) && PyErr_Occurred()) return 0; *(double *) itemp = value; return 1; } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
GB_binop__minus_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fp32) // A*D function (colscale): GB (_AxD__minus_fp32) // D*A function (rowscale): GB (_DxB__minus_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__minus_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__minus_fp32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fp32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fp32) // C=scalar+B GB (_bind1st__minus_fp32) // C=scalar+B' GB (_bind1st_tran__minus_fp32) // C=A+scalar GB (_bind2nd__minus_fp32) // C=A'+scalar GB (_bind2nd_tran__minus_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_FP32 || GxB_NO_MINUS_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fp32) // A*D function (colscale): GB (_AxD__minus_fp32) // D*A function (rowscale): GB (_DxB__minus_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__minus_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__minus_fp32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fp32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fp32) // C=scalar+B GB (_bind1st__minus_fp32) // C=scalar+B' GB (_bind1st_tran__minus_fp32) // C=A+scalar GB (_bind2nd__minus_fp32) // C=A'+scalar GB (_bind2nd_tran__minus_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_FP32 || GxB_NO_MINUS_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fp32) // A*D function (colscale): GB (_AxD__minus_fp32) // D*A function (rowscale): GB (_DxB__minus_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__minus_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__minus_fp32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fp32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fp32) // C=scalar+B GB (_bind1st__minus_fp32) // C=scalar+B' GB (_bind1st_tran__minus_fp32) // C=A+scalar GB (_bind2nd__minus_fp32) // C=A'+scalar GB (_bind2nd_tran__minus_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_FP32 || GxB_NO_MINUS_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__times_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_uint8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_uint8) // A*D function (colscale): GB (_AxD__times_uint8) // D*A function (rowscale): GB (_DxB__times_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__times_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__times_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_uint8) // C=scalar+B GB (_bind1st__times_uint8) // C=scalar+B' GB (_bind1st_tran__times_uint8) // C=A+scalar GB (_bind2nd__times_uint8) // C=A'+scalar GB (_bind2nd_tran__times_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_UINT8 || GxB_NO_TIMES_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__times_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__times_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_uint8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_uint8) // A*D function (colscale): GB (_AxD__times_uint8) // D*A function (rowscale): GB (_DxB__times_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__times_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__times_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_uint8) // C=scalar+B GB (_bind1st__times_uint8) // C=scalar+B' GB (_bind1st_tran__times_uint8) // C=A+scalar GB (_bind2nd__times_uint8) // C=A'+scalar GB (_bind2nd_tran__times_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_UINT8 || GxB_NO_TIMES_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__times_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__times_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_uint8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_uint8) // A*D function (colscale): GB (_AxD__times_uint8) // D*A function (rowscale): GB (_DxB__times_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__times_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__times_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_uint8) // C=scalar+B GB (_bind1st__times_uint8) // C=scalar+B' GB (_bind1st_tran__times_uint8) // C=A+scalar GB (_bind2nd__times_uint8) // C=A'+scalar GB (_bind2nd_tran__times_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_UINT8 || GxB_NO_TIMES_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__times_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__times_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rmsprop_op.h
#pragma once #include "caffe2/core/operator.h" namespace caffe2 { template <typename Context> void rmsprop_update( int N, const float* g, const float* ms, const float* mom, float* ng, float* nms, float* nmom, float decay, float momentum, float epsilon, const float* lr, Context* context) { #pragma omp parallel for for (auto i = 0; i < N; ++i) { // Update new mean square estimate nms[i] = ms[i] + (1.0f - decay) * (g[i] * g[i] - ms[i]); // Update momentum estimate nmom[i] = mom[i] * momentum + lr[0] * g[i] / std::sqrt(epsilon + nms[i]); // New gradient is the momentum ng[i] = nmom[i]; } } template <typename T, class Context> class RmsPropOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; RmsPropOp(const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), decay_(OperatorBase::GetSingleArgument<float>("decay", 0.9)), momentum_(OperatorBase::GetSingleArgument<float>("momentum", 0.0)), epsilon_(OperatorBase::GetSingleArgument<float>("epsilon", 1e-5)) {} bool RunOnDevice() override { CAFFE_ENFORCE(Input(LR).size() == 1); CAFFE_ENFORCE(Input(GRAD).size() == Input(MEAN_SQUARES).size()); CAFFE_ENFORCE(Input(GRAD).size() == Input(OUTPUT_MOMENTUM).size()); Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD)); Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD)); Output(OUTPUT_MEAN_SQUARES)->ResizeLike(Input(MEAN_SQUARES)); Output(OUTPUT_MOMENTUM)->ResizeLike(Input(MOMENTUM)); rmsprop_update<Context>( Input(GRAD).size(), Input(GRAD).template data<T>(), Input(MEAN_SQUARES).template data<T>(), Input(MOMENTUM).template data<T>(), Output(OUTPUT_GRAD)->template mutable_data<T>(), Output(OUTPUT_MEAN_SQUARES)->template mutable_data<T>(), Output(OUTPUT_MOMENTUM)->template mutable_data<T>(), decay_, momentum_, epsilon_, Input(LR).template data<T>(), &context_); return true; } protected: T decay_{0.9}; T momentum_{0.0}; T epsilon_{1e-8}; INPUT_TAGS(GRAD, MEAN_SQUARES, MOMENTUM, LR); OUTPUT_TAGS(OUTPUT_GRAD, OUTPUT_MEAN_SQUARES, OUTPUT_MOMENTUM); }; }
#pragma once #include "caffe2/core/operator.h" namespace caffe2 { template < typename Context > void rmsprop_update( int N, const float *g, const float *ms, const float *mom, float *ng, float *nms, float *nmom, float decay, float momentum, float epsilon, const float *lr, Context * context) { for (auto i = 0; i < N; ++i) { //Update new mean square estimate nms[i] = ms[i] + (1.0 f - decay) * (g[i] * g[i] - ms[i]); //Update momentum estimate nmom[i] = mom[i] * momentum + lr[0] * g[i] / std: :sqrt(epsilon + nms[i]); //New gradient is the momentum ng[i] = nmom[i]; } } template < typename T, class Context > class RmsPropOp final: public Operator < Context > { public: USE_OPERATOR_CONTEXT_FUNCTIONS; RmsPropOp(const OperatorDef & operator_def, Workspace * ws) : Operator < Context > (operator_def, ws), decay_(OperatorBase: :GetSingleArgument < float >("decay", 0.9)), momentum_(OperatorBase: :GetSingleArgument < float >("momentum", 0.0)), epsilon_(OperatorBase: :GetSingleArgument < float >("epsilon", 1e-5)) { } bool RunOnDevice() override { CAFFE_ENFORCE(Input(LR).size() == 1); CAFFE_ENFORCE(Input(GRAD).size() == Input(MEAN_SQUARES).size()); CAFFE_ENFORCE(Input(GRAD).size() == Input(OUTPUT_MOMENTUM).size()); Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD)); Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD)); Output(OUTPUT_MEAN_SQUARES)->ResizeLike(Input(MEAN_SQUARES)); Output(OUTPUT_MOMENTUM)->ResizeLike(Input(MOMENTUM)); rmsprop_update < Context > ( Input(GRAD).size(), Input(GRAD).template data < T > (), Input(MEAN_SQUARES).template data < T > (), Input(MOMENTUM).template data < T > (), Output(OUTPUT_GRAD)->template mutable_data < T > (), Output(OUTPUT_MEAN_SQUARES)->template mutable_data < T > (), Output(OUTPUT_MOMENTUM)->template mutable_data < T > (), decay_, momentum_, epsilon_, Input(LR).template data < T > (), &context_); return true; } protected: T decay_ { 0.9 }; T momentum_ { 0.0 }; T epsilon_ { 1e-8 }; INPUT_TAGS(GRAD, MEAN_SQUARES, MOMENTUM, LR); OUTPUT_TAGS(OUTPUT_GRAD, OUTPUT_MEAN_SQUARES, OUTPUT_MOMENTUM); }; }
#pragma once #include "caffe2/core/operator.h" namespace caffe2 { template < typename Context > void rmsprop_update( int N, const float *g, const float *ms, const float *mom, float *ng, float *nms, float *nmom, float decay, float momentum, float epsilon, const float *lr, Context * context) { #pragma omp parallel for for (auto i = 0; i < N; ++i) { //Update new mean square estimate nms[i] = ms[i] + (1.0 f - decay) * (g[i] * g[i] - ms[i]); //Update momentum estimate nmom[i] = mom[i] * momentum + lr[0] * g[i] / std: :sqrt(epsilon + nms[i]); //New gradient is the momentum ng[i] = nmom[i]; } } template < typename T, class Context > class RmsPropOp final: public Operator < Context > { public: USE_OPERATOR_CONTEXT_FUNCTIONS; RmsPropOp(const OperatorDef & operator_def, Workspace * ws) : Operator < Context > (operator_def, ws), decay_(OperatorBase: :GetSingleArgument < float >("decay", 0.9)), momentum_(OperatorBase: :GetSingleArgument < float >("momentum", 0.0)), epsilon_(OperatorBase: :GetSingleArgument < float >("epsilon", 1e-5)) { } bool RunOnDevice() override { CAFFE_ENFORCE(Input(LR).size() == 1); CAFFE_ENFORCE(Input(GRAD).size() == Input(MEAN_SQUARES).size()); CAFFE_ENFORCE(Input(GRAD).size() == Input(OUTPUT_MOMENTUM).size()); Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD)); Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD)); Output(OUTPUT_MEAN_SQUARES)->ResizeLike(Input(MEAN_SQUARES)); Output(OUTPUT_MOMENTUM)->ResizeLike(Input(MOMENTUM)); rmsprop_update < Context > ( Input(GRAD).size(), Input(GRAD).template data < T > (), Input(MEAN_SQUARES).template data < T > (), Input(MOMENTUM).template data < T > (), Output(OUTPUT_GRAD)->template mutable_data < T > (), Output(OUTPUT_MEAN_SQUARES)->template mutable_data < T > (), Output(OUTPUT_MOMENTUM)->template mutable_data < T > (), decay_, momentum_, epsilon_, Input(LR).template data < T > (), &context_); return true; } protected: T decay_ { 0.9 }; T momentum_ { 0.0 }; T epsilon_ { 1e-8 }; INPUT_TAGS(GRAD, MEAN_SQUARES, MOMENTUM, LR); OUTPUT_TAGS(OUTPUT_GRAD, OUTPUT_MEAN_SQUARES, OUTPUT_MOMENTUM); }; }
base_contact_search.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_BASE_CONTACT_SEARCH_H_INCLUDED ) #define KRATOS_BASE_CONTACT_SEARCH_H_INCLUDED // System includes // External includes // Project includes #include "processes/simple_mortar_mapper_process.h" #include "includes/model_part.h" #include "includes/kratos_parameters.h" /* Custom includes*/ #include "custom_includes/point_item.h" #include "custom_conditions/paired_condition.h" /* Tree structures */ // #include "spatial_containers/bounding_volume_tree.h" // k-DOP #include "spatial_containers/spatial_containers.h" // kd-tree namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ /// The definition of the size type typedef std::size_t SizeType; ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class BaseContactSearch * @ingroup ContactStructuralMechanicsApplication * @brief This utilitiy has as objective to create the contact conditions. * @details The conditions that can be created are Mortar conditions (or segment to segment) conditions: The created conditions will be between two segments * The utility employs the projection.h from MeshingApplication, which works internally using a kd-tree * @author Vicente Mataix Ferrandiz * @tparam TDim The dimension of work * @tparam TNumNodes The number of nodes of the slave * @tparam TNumNodesMaster The number of nodes of the master */ template<SizeType TDim, SizeType TNumNodes, SizeType TNumNodesMaster = TNumNodes> class BaseContactSearch { public: ///@name Type Definitions ///@{ /// General type definitions typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef Node<3> NodeType; typedef Geometry<NodeType> GeometryType; /// Index type definition typedef std::size_t IndexType; /// Type definitions for the tree typedef PointItem PointType; typedef PointType::Pointer PointTypePointer; typedef std::vector<PointTypePointer> PointVector; typedef PointVector::iterator PointIterator; typedef std::vector<double> DistanceVector; typedef DistanceVector::iterator DistanceIterator; /// KDtree definitions typedef Bucket< 3ul, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > BucketType; typedef Tree< KDTreePartition<BucketType> > KDTree; /// The type of mapper considered typedef SimpleMortarMapperProcess<TDim, TNumNodes, Variable<array_1d<double, 3>>, TNumNodesMaster> MapperType; /// The definition of zero tolerance static constexpr double GapThreshold = 2.0e-3; /// The definition of zero tolerance static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon(); /// Pointer definition of BaseContactSearch KRATOS_CLASS_POINTER_DEFINITION( BaseContactSearch ); ///@} ///@name Enum's ///@{ enum class SearchTreeType {KdtreeInRadius = 0, KdtreeInBox = 1, Kdop = 2}; enum class CheckResult {Fail = 0, AlreadyInTheMap = 1, OK = 2}; enum class CheckGap {NoCheck = 0, DirectCheck = 1, MappingCheck = 2}; enum class TypeSolution {NormalContactStress = 0, ScalarLagrangeMultiplier = 1, VectorLagrangeMultiplier = 2, FrictionlessPenaltyMethod = 3, FrictionalPenaltyMethod = 4}; ///@} ///@name Life Cycle ///@{ /** * @brief The constructor of the search utility uses the following inputs: * @param rMainModelPart The model part to be considered * @param ThisParameters The configuration parameters, it includes: * - The allocation considered in the search * - The factor considered to check if active or not * - The integration order considered * - The size of the bucket * - The proportion increased of the Radius/Bounding-box volume for the search * - TypeSearch: 0 means search in radius, 1 means search in box * @todo Add more types of bounding boxes, as kdops, look bounding_volume_tree.h * @note Use an InterfacePreprocess object to create such a model part from a regular one: * -# InterfaceMapper = InterfacePreprocess() * -# InterfacePart = InterfaceMapper.GenerateInterfacePart(Complete_Model_Part) */ BaseContactSearch( ModelPart& rMainModelPart, Parameters ThisParameters = Parameters(R"({})") ); virtual ~BaseContactSearch()= default;; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief This function initializes the ALM frictionless mortar conditions already created */ void InitializeMortarConditions(); /** * @brief This function clears the mortar conditions already created */ void ClearMortarConditions(); /** * @brief This method checks that the contact model part is unique (so the model parts contain unique contact pairs) */ void CheckContactModelParts(); /** * @brief This function creates a lists points ready for the Mortar method */ void CreatePointListMortar(); /** * @brief This function updates a lists points ready for the Mortar method */ void UpdatePointListMortar(); /** * @brief This function has as pourpose to find potential contact conditions and fill the mortar conditions with the necessary pointers */ void UpdateMortarConditions(); /** * @brief It checks the current mortar conditions */ void CheckMortarConditions(); /** * @brief It sets if the search is inverted */ void InvertSearch(); /** * @brief This resets the contact operators */ void ResetContactOperators(); ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /************************************ GET INFO *************************************/ /***********************************************************************************/ virtual std::string Info() const { return "BaseContactSearch"; } /************************************ PRINT INFO ***********************************/ /***********************************************************************************/ virtual void PrintInfo(std::ostream& rOStream) const { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ModelPart& mrMainModelPart; /// The main model part Parameters mThisParameters; /// The configuration parameters CheckGap mCheckGap; /// If the gap is checked during the search TypeSolution mTypeSolution; /// The solution type bool mInvertedSearch; /// The search will be done inverting the way master and slave/master is assigned std::string mConditionName; /// The name of the condition to be created bool mCreateAuxiliarConditions; /// If the auxiliar conditions are created or not PointVector mPointListDestination; /// A list that contents the all the points (from nodes) from the modelpart bool mMultipleSearchs; /// If we consider multiple serach or not bool mPredefinedMasterSlave; /// If the master/slave sides are predefined ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method checks the pairing * @param rComputingModelPart The modelpart used in the assemble of the system * @param rConditionId The ID of the new condition to be created */ virtual void CheckPairing( ModelPart& rComputingModelPart, IndexType& rConditionId ); /** * @brief This method computes which nodes are active or inactive after after mapping the coordinates */ virtual void ComputeActiveInactiveNodes(); /** * @brief This method sets as active a node and it sets to an explicit approximation its LM * @param ItNode The node iterator to set * @param CommonEpsilon The penalty value * @param ScaleFactor The scale factor */ virtual void SetActiveNode( NodesArrayType::iterator ItNode, const double CommonEpsilon, const double ScaleFactor = 1.0 ); /** * @brief This method sets as inactive a node and it sets to zero its LM * @param ItNode The node iterator to set */ virtual void SetInactiveNode(NodesArrayType::iterator ItNode); /** * @brief This converts the framework string to an enum * @param str The string * @return CheckGap: The equivalent enum */ CheckGap ConvertCheckGap(const std::string& str); /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors */ Parameters GetDefaultParameters(); ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief This method sets the origin destination model maps when only one model part is provided * @details The only model part should have MASTER/SLAVE flags in the nodes and conditions * @param rModelPart The main model part, where the origin/destination model parts will be created */ void SetOriginDestinationModelParts(ModelPart& rModelPart); /** * @brief This function clears the mortar conditions already created * @param rNodesArray The array of nodes to clear */ void ClearScalarMortarConditions(NodesArrayType& rNodesArray); /** * @brief This function clears the mortar conditions already created * @param rNodesArray The array of nodes to clear */ void ClearComponentsMortarConditions(NodesArrayType& rNodesArray); /** * @brief This function clears the ALM frictionless mortar conditions already created * @param rNodesArray The array of nodes to clear */ void ClearALMFrictionlessMortarConditions(NodesArrayType& rNodesArray); /** * @brief It check the conditions if they are correctly detected * @param pIndexesPairs Set containing the ids to the conditions * @param pCond1 The pointer to the condition in the destination model part * @param pCond2 The pointer to the condition in the destination model part * @param InvertedSearch If the search is inverted * @return If OK or Fail on the check */ inline CheckResult CheckCondition( IndexMap::Pointer pIndexesPairs, const Condition::Pointer pCond1, const Condition::Pointer pCond2, const bool InvertedSearch = false ); /** * @brief This method is used in case of not predefined master/slave we assign the master/slave nodes and conditions * @param rModelPart The model part to assign the flags */ static inline void NotPredefinedMasterSlave(ModelPart& rModelPart); /** * @brief This method gets the maximum the ID of the conditions */ inline IndexType GetMaximumConditionsIds(); /** * @brief This method checks the potential pairing between two conditions/geometries * @param rComputingModelPart The modelpart used in the assemble of the system * @param rConditionId The ID of the new condition to be created * @param pCondSlave The pointer to the slave condition * @param rPointsFound The potential pairs found * @param NumberOfPointsFound The number of potential pairs found * @param IndexesPairs The id sets of potential pairs */ inline void AddPotentialPairing( ModelPart& rComputingModelPart, IndexType& rConditionId, Condition::Pointer pCondSlave, PointVector& rPointsFound, const IndexType NumberOfPointsFound, IndexMap::Pointer IndexesPairs ); /** * @brief This method add a new pair to the computing model part * @param rComputingModelPart The modelpart used in the assemble of the system * @param rConditionId The ID of the new condition to be created * @param pCondSlave The pointer to the slave condition * @param pCondMaster The pointer to the master condition */ inline void AddPairing( ModelPart& rComputingModelPart, IndexType& rConditionId, Condition::Pointer pCondSlave, Condition::Pointer pCondMaster ); /** * @brief This method add a new pair to the computing model part * @param rComputingModelPart The modelpart used in the assemble of the system * @param rConditionId The ID of the new condition to be created * @param pCondSlave The pointer to the slave condition * @param pCondMaster The pointer to the master condition * @param IndexesPairs The map of indexes considered */ inline void AddPairing( ModelPart& rComputingModelPart, IndexType& rConditionId, Condition::Pointer pCondSlave, Condition::Pointer pCondMaster, IndexMap::Pointer IndexesPairs ); /** * @brief This method computes the gap using a mapper * @param SearchOrientation The orientation of the search (inverted or not) */ inline void ComputeMappedGap(const bool SearchOrientation); /** * @brief This method sets as inactive a node and it sets to zero its LM */ inline void ComputeWeightedReaction(); /** * @brief This method switchs the flag of an array of nodes * @param rNodes The set of nodes where the flags are reset */ static inline void SwitchFlagNodes(NodesArrayType& rNodes) { #pragma omp parallel for for(int i = 0; i < static_cast<int>(rNodes.size()); ++i) { auto it_node = rNodes.begin() + i; it_node->Flip(SLAVE); it_node->Flip(MASTER); } } /** * @brief This method creates the auxiliar the pairing * @param rContactModelPart The modelpart used in the assemble of the system * @param rComputingModelPart The modelpart used in the assemble of the system * @param rConditionId The ID of the new condition to be created */ inline void CreateAuxiliarConditions( ModelPart& rContactModelPart, ModelPart& rComputingModelPart, IndexType& rConditionId ); /** * @brief Calculates the minimal distance between one node and its center * @return The radius of the geometry */ static inline double Radius(GeometryType& ThisGeometry); /** * @brief This converts the framework string to an enum * @param str The string * @return SearchTreeType: The equivalent enum */ SearchTreeType ConvertSearchTree(const std::string& str); ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class BaseContactSearch ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ // /****************************** INPUT STREAM FUNCTION ******************************/ // /***********************************************************************************/ // // template<class TPointType, class TPointerType> // inline std::istream& operator >> (std::istream& rIStream, // BaseContactSearch& rThis); // // /***************************** OUTPUT STREAM FUNCTION ******************************/ // /***********************************************************************************/ // // template<class TPointType, class TPointerType> // inline std::ostream& operator << (std::ostream& rOStream, // const BaseContactSearch& rThis) // { // return rOStream; // } ///@} } // namespace Kratos. #endif // KRATOS_BASE_CONTACT_SEARCH_H_INCLUDED defined
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_BASE_CONTACT_SEARCH_H_INCLUDED ) #define KRATOS_BASE_CONTACT_SEARCH_H_INCLUDED // System includes // External includes // Project includes #include "processes/simple_mortar_mapper_process.h" #include "includes/model_part.h" #include "includes/kratos_parameters.h" /* Custom includes*/ #include "custom_includes/point_item.h" #include "custom_conditions/paired_condition.h" /* Tree structures */ // #include "spatial_containers/bounding_volume_tree.h" // k-DOP #include "spatial_containers/spatial_containers.h" // kd-tree namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ /// The definition of the size type typedef std::size_t SizeType; ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class BaseContactSearch * @ingroup ContactStructuralMechanicsApplication * @brief This utilitiy has as objective to create the contact conditions. * @details The conditions that can be created are Mortar conditions (or segment to segment) conditions: The created conditions will be between two segments * The utility employs the projection.h from MeshingApplication, which works internally using a kd-tree * @author Vicente Mataix Ferrandiz * @tparam TDim The dimension of work * @tparam TNumNodes The number of nodes of the slave * @tparam TNumNodesMaster The number of nodes of the master */ template<SizeType TDim, SizeType TNumNodes, SizeType TNumNodesMaster = TNumNodes> class BaseContactSearch { public: ///@name Type Definitions ///@{ /// General type definitions typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef Node<3> NodeType; typedef Geometry<NodeType> GeometryType; /// Index type definition typedef std::size_t IndexType; /// Type definitions for the tree typedef PointItem PointType; typedef PointType::Pointer PointTypePointer; typedef std::vector<PointTypePointer> PointVector; typedef PointVector::iterator PointIterator; typedef std::vector<double> DistanceVector; typedef DistanceVector::iterator DistanceIterator; /// KDtree definitions typedef Bucket< 3ul, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > BucketType; typedef Tree< KDTreePartition<BucketType> > KDTree; /// The type of mapper considered typedef SimpleMortarMapperProcess<TDim, TNumNodes, Variable<array_1d<double, 3>>, TNumNodesMaster> MapperType; /// The definition of zero tolerance static constexpr double GapThreshold = 2.0e-3; /// The definition of zero tolerance static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon(); /// Pointer definition of BaseContactSearch KRATOS_CLASS_POINTER_DEFINITION( BaseContactSearch ); ///@} ///@name Enum's ///@{ enum class SearchTreeType {KdtreeInRadius = 0, KdtreeInBox = 1, Kdop = 2}; enum class CheckResult {Fail = 0, AlreadyInTheMap = 1, OK = 2}; enum class CheckGap {NoCheck = 0, DirectCheck = 1, MappingCheck = 2}; enum class TypeSolution {NormalContactStress = 0, ScalarLagrangeMultiplier = 1, VectorLagrangeMultiplier = 2, FrictionlessPenaltyMethod = 3, FrictionalPenaltyMethod = 4}; ///@} ///@name Life Cycle ///@{ /** * @brief The constructor of the search utility uses the following inputs: * @param rMainModelPart The model part to be considered * @param ThisParameters The configuration parameters, it includes: * - The allocation considered in the search * - The factor considered to check if active or not * - The integration order considered * - The size of the bucket * - The proportion increased of the Radius/Bounding-box volume for the search * - TypeSearch: 0 means search in radius, 1 means search in box * @todo Add more types of bounding boxes, as kdops, look bounding_volume_tree.h * @note Use an InterfacePreprocess object to create such a model part from a regular one: * -# InterfaceMapper = InterfacePreprocess() * -# InterfacePart = InterfaceMapper.GenerateInterfacePart(Complete_Model_Part) */ BaseContactSearch( ModelPart& rMainModelPart, Parameters ThisParameters = Parameters(R"({})") ); virtual ~BaseContactSearch()= default;; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief This function initializes the ALM frictionless mortar conditions already created */ void InitializeMortarConditions(); /** * @brief This function clears the mortar conditions already created */ void ClearMortarConditions(); /** * @brief This method checks that the contact model part is unique (so the model parts contain unique contact pairs) */ void CheckContactModelParts(); /** * @brief This function creates a lists points ready for the Mortar method */ void CreatePointListMortar(); /** * @brief This function updates a lists points ready for the Mortar method */ void UpdatePointListMortar(); /** * @brief This function has as pourpose to find potential contact conditions and fill the mortar conditions with the necessary pointers */ void UpdateMortarConditions(); /** * @brief It checks the current mortar conditions */ void CheckMortarConditions(); /** * @brief It sets if the search is inverted */ void InvertSearch(); /** * @brief This resets the contact operators */ void ResetContactOperators(); ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /************************************ GET INFO *************************************/ /***********************************************************************************/ virtual std::string Info() const { return "BaseContactSearch"; } /************************************ PRINT INFO ***********************************/ /***********************************************************************************/ virtual void PrintInfo(std::ostream& rOStream) const { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ModelPart& mrMainModelPart; /// The main model part Parameters mThisParameters; /// The configuration parameters CheckGap mCheckGap; /// If the gap is checked during the search TypeSolution mTypeSolution; /// The solution type bool mInvertedSearch; /// The search will be done inverting the way master and slave/master is assigned std::string mConditionName; /// The name of the condition to be created bool mCreateAuxiliarConditions; /// If the auxiliar conditions are created or not PointVector mPointListDestination; /// A list that contents the all the points (from nodes) from the modelpart bool mMultipleSearchs; /// If we consider multiple serach or not bool mPredefinedMasterSlave; /// If the master/slave sides are predefined ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method checks the pairing * @param rComputingModelPart The modelpart used in the assemble of the system * @param rConditionId The ID of the new condition to be created */ virtual void CheckPairing( ModelPart& rComputingModelPart, IndexType& rConditionId ); /** * @brief This method computes which nodes are active or inactive after after mapping the coordinates */ virtual void ComputeActiveInactiveNodes(); /** * @brief This method sets as active a node and it sets to an explicit approximation its LM * @param ItNode The node iterator to set * @param CommonEpsilon The penalty value * @param ScaleFactor The scale factor */ virtual void SetActiveNode( NodesArrayType::iterator ItNode, const double CommonEpsilon, const double ScaleFactor = 1.0 ); /** * @brief This method sets as inactive a node and it sets to zero its LM * @param ItNode The node iterator to set */ virtual void SetInactiveNode(NodesArrayType::iterator ItNode); /** * @brief This converts the framework string to an enum * @param str The string * @return CheckGap: The equivalent enum */ CheckGap ConvertCheckGap(const std::string& str); /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors */ Parameters GetDefaultParameters(); ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief This method sets the origin destination model maps when only one model part is provided * @details The only model part should have MASTER/SLAVE flags in the nodes and conditions * @param rModelPart The main model part, where the origin/destination model parts will be created */ void SetOriginDestinationModelParts(ModelPart& rModelPart); /** * @brief This function clears the mortar conditions already created * @param rNodesArray The array of nodes to clear */ void ClearScalarMortarConditions(NodesArrayType& rNodesArray); /** * @brief This function clears the mortar conditions already created * @param rNodesArray The array of nodes to clear */ void ClearComponentsMortarConditions(NodesArrayType& rNodesArray); /** * @brief This function clears the ALM frictionless mortar conditions already created * @param rNodesArray The array of nodes to clear */ void ClearALMFrictionlessMortarConditions(NodesArrayType& rNodesArray); /** * @brief It check the conditions if they are correctly detected * @param pIndexesPairs Set containing the ids to the conditions * @param pCond1 The pointer to the condition in the destination model part * @param pCond2 The pointer to the condition in the destination model part * @param InvertedSearch If the search is inverted * @return If OK or Fail on the check */ inline CheckResult CheckCondition( IndexMap::Pointer pIndexesPairs, const Condition::Pointer pCond1, const Condition::Pointer pCond2, const bool InvertedSearch = false ); /** * @brief This method is used in case of not predefined master/slave we assign the master/slave nodes and conditions * @param rModelPart The model part to assign the flags */ static inline void NotPredefinedMasterSlave(ModelPart& rModelPart); /** * @brief This method gets the maximum the ID of the conditions */ inline IndexType GetMaximumConditionsIds(); /** * @brief This method checks the potential pairing between two conditions/geometries * @param rComputingModelPart The modelpart used in the assemble of the system * @param rConditionId The ID of the new condition to be created * @param pCondSlave The pointer to the slave condition * @param rPointsFound The potential pairs found * @param NumberOfPointsFound The number of potential pairs found * @param IndexesPairs The id sets of potential pairs */ inline void AddPotentialPairing( ModelPart& rComputingModelPart, IndexType& rConditionId, Condition::Pointer pCondSlave, PointVector& rPointsFound, const IndexType NumberOfPointsFound, IndexMap::Pointer IndexesPairs ); /** * @brief This method add a new pair to the computing model part * @param rComputingModelPart The modelpart used in the assemble of the system * @param rConditionId The ID of the new condition to be created * @param pCondSlave The pointer to the slave condition * @param pCondMaster The pointer to the master condition */ inline void AddPairing( ModelPart& rComputingModelPart, IndexType& rConditionId, Condition::Pointer pCondSlave, Condition::Pointer pCondMaster ); /** * @brief This method add a new pair to the computing model part * @param rComputingModelPart The modelpart used in the assemble of the system * @param rConditionId The ID of the new condition to be created * @param pCondSlave The pointer to the slave condition * @param pCondMaster The pointer to the master condition * @param IndexesPairs The map of indexes considered */ inline void AddPairing( ModelPart& rComputingModelPart, IndexType& rConditionId, Condition::Pointer pCondSlave, Condition::Pointer pCondMaster, IndexMap::Pointer IndexesPairs ); /** * @brief This method computes the gap using a mapper * @param SearchOrientation The orientation of the search (inverted or not) */ inline void ComputeMappedGap(const bool SearchOrientation); /** * @brief This method sets as inactive a node and it sets to zero its LM */ inline void ComputeWeightedReaction(); /** * @brief This method switchs the flag of an array of nodes * @param rNodes The set of nodes where the flags are reset */ static inline void SwitchFlagNodes(NodesArrayType& rNodes) { for(int i = 0; i < static_cast<int>(rNodes.size()); ++i) { auto it_node = rNodes.begin() + i; it_node->Flip(SLAVE); it_node->Flip(MASTER); } } /** * @brief This method creates the auxiliar the pairing * @param rContactModelPart The modelpart used in the assemble of the system * @param rComputingModelPart The modelpart used in the assemble of the system * @param rConditionId The ID of the new condition to be created */ inline void CreateAuxiliarConditions( ModelPart& rContactModelPart, ModelPart& rComputingModelPart, IndexType& rConditionId ); /** * @brief Calculates the minimal distance between one node and its center * @return The radius of the geometry */ static inline double Radius(GeometryType& ThisGeometry); /** * @brief This converts the framework string to an enum * @param str The string * @return SearchTreeType: The equivalent enum */ SearchTreeType ConvertSearchTree(const std::string& str); ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class BaseContactSearch ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ // /****************************** INPUT STREAM FUNCTION ******************************/ // /***********************************************************************************/ // // template<class TPointType, class TPointerType> // inline std::istream& operator >> (std::istream& rIStream, // BaseContactSearch& rThis); // // /***************************** OUTPUT STREAM FUNCTION ******************************/ // /***********************************************************************************/ // // template<class TPointType, class TPointerType> // inline std::ostream& operator << (std::ostream& rOStream, // const BaseContactSearch& rThis) // { // return rOStream; // } ///@} } // namespace Kratos. #endif // KRATOS_BASE_CONTACT_SEARCH_H_INCLUDED defined
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_BASE_CONTACT_SEARCH_H_INCLUDED ) #define KRATOS_BASE_CONTACT_SEARCH_H_INCLUDED // System includes // External includes // Project includes #include "processes/simple_mortar_mapper_process.h" #include "includes/model_part.h" #include "includes/kratos_parameters.h" /* Custom includes*/ #include "custom_includes/point_item.h" #include "custom_conditions/paired_condition.h" /* Tree structures */ // #include "spatial_containers/bounding_volume_tree.h" // k-DOP #include "spatial_containers/spatial_containers.h" // kd-tree namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ /// The definition of the size type typedef std::size_t SizeType; ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class BaseContactSearch * @ingroup ContactStructuralMechanicsApplication * @brief This utilitiy has as objective to create the contact conditions. * @details The conditions that can be created are Mortar conditions (or segment to segment) conditions: The created conditions will be between two segments * The utility employs the projection.h from MeshingApplication, which works internally using a kd-tree * @author Vicente Mataix Ferrandiz * @tparam TDim The dimension of work * @tparam TNumNodes The number of nodes of the slave * @tparam TNumNodesMaster The number of nodes of the master */ template<SizeType TDim, SizeType TNumNodes, SizeType TNumNodesMaster = TNumNodes> class BaseContactSearch { public: ///@name Type Definitions ///@{ /// General type definitions typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef Node<3> NodeType; typedef Geometry<NodeType> GeometryType; /// Index type definition typedef std::size_t IndexType; /// Type definitions for the tree typedef PointItem PointType; typedef PointType::Pointer PointTypePointer; typedef std::vector<PointTypePointer> PointVector; typedef PointVector::iterator PointIterator; typedef std::vector<double> DistanceVector; typedef DistanceVector::iterator DistanceIterator; /// KDtree definitions typedef Bucket< 3ul, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > BucketType; typedef Tree< KDTreePartition<BucketType> > KDTree; /// The type of mapper considered typedef SimpleMortarMapperProcess<TDim, TNumNodes, Variable<array_1d<double, 3>>, TNumNodesMaster> MapperType; /// The definition of zero tolerance static constexpr double GapThreshold = 2.0e-3; /// The definition of zero tolerance static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon(); /// Pointer definition of BaseContactSearch KRATOS_CLASS_POINTER_DEFINITION( BaseContactSearch ); ///@} ///@name Enum's ///@{ enum class SearchTreeType {KdtreeInRadius = 0, KdtreeInBox = 1, Kdop = 2}; enum class CheckResult {Fail = 0, AlreadyInTheMap = 1, OK = 2}; enum class CheckGap {NoCheck = 0, DirectCheck = 1, MappingCheck = 2}; enum class TypeSolution {NormalContactStress = 0, ScalarLagrangeMultiplier = 1, VectorLagrangeMultiplier = 2, FrictionlessPenaltyMethod = 3, FrictionalPenaltyMethod = 4}; ///@} ///@name Life Cycle ///@{ /** * @brief The constructor of the search utility uses the following inputs: * @param rMainModelPart The model part to be considered * @param ThisParameters The configuration parameters, it includes: * - The allocation considered in the search * - The factor considered to check if active or not * - The integration order considered * - The size of the bucket * - The proportion increased of the Radius/Bounding-box volume for the search * - TypeSearch: 0 means search in radius, 1 means search in box * @todo Add more types of bounding boxes, as kdops, look bounding_volume_tree.h * @note Use an InterfacePreprocess object to create such a model part from a regular one: * -# InterfaceMapper = InterfacePreprocess() * -# InterfacePart = InterfaceMapper.GenerateInterfacePart(Complete_Model_Part) */ BaseContactSearch( ModelPart& rMainModelPart, Parameters ThisParameters = Parameters(R"({})") ); virtual ~BaseContactSearch()= default;; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief This function initializes the ALM frictionless mortar conditions already created */ void InitializeMortarConditions(); /** * @brief This function clears the mortar conditions already created */ void ClearMortarConditions(); /** * @brief This method checks that the contact model part is unique (so the model parts contain unique contact pairs) */ void CheckContactModelParts(); /** * @brief This function creates a lists points ready for the Mortar method */ void CreatePointListMortar(); /** * @brief This function updates a lists points ready for the Mortar method */ void UpdatePointListMortar(); /** * @brief This function has as pourpose to find potential contact conditions and fill the mortar conditions with the necessary pointers */ void UpdateMortarConditions(); /** * @brief It checks the current mortar conditions */ void CheckMortarConditions(); /** * @brief It sets if the search is inverted */ void InvertSearch(); /** * @brief This resets the contact operators */ void ResetContactOperators(); ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /************************************ GET INFO *************************************/ /***********************************************************************************/ virtual std::string Info() const { return "BaseContactSearch"; } /************************************ PRINT INFO ***********************************/ /***********************************************************************************/ virtual void PrintInfo(std::ostream& rOStream) const { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ModelPart& mrMainModelPart; /// The main model part Parameters mThisParameters; /// The configuration parameters CheckGap mCheckGap; /// If the gap is checked during the search TypeSolution mTypeSolution; /// The solution type bool mInvertedSearch; /// The search will be done inverting the way master and slave/master is assigned std::string mConditionName; /// The name of the condition to be created bool mCreateAuxiliarConditions; /// If the auxiliar conditions are created or not PointVector mPointListDestination; /// A list that contents the all the points (from nodes) from the modelpart bool mMultipleSearchs; /// If we consider multiple serach or not bool mPredefinedMasterSlave; /// If the master/slave sides are predefined ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method checks the pairing * @param rComputingModelPart The modelpart used in the assemble of the system * @param rConditionId The ID of the new condition to be created */ virtual void CheckPairing( ModelPart& rComputingModelPart, IndexType& rConditionId ); /** * @brief This method computes which nodes are active or inactive after after mapping the coordinates */ virtual void ComputeActiveInactiveNodes(); /** * @brief This method sets as active a node and it sets to an explicit approximation its LM * @param ItNode The node iterator to set * @param CommonEpsilon The penalty value * @param ScaleFactor The scale factor */ virtual void SetActiveNode( NodesArrayType::iterator ItNode, const double CommonEpsilon, const double ScaleFactor = 1.0 ); /** * @brief This method sets as inactive a node and it sets to zero its LM * @param ItNode The node iterator to set */ virtual void SetInactiveNode(NodesArrayType::iterator ItNode); /** * @brief This converts the framework string to an enum * @param str The string * @return CheckGap: The equivalent enum */ CheckGap ConvertCheckGap(const std::string& str); /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors */ Parameters GetDefaultParameters(); ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief This method sets the origin destination model maps when only one model part is provided * @details The only model part should have MASTER/SLAVE flags in the nodes and conditions * @param rModelPart The main model part, where the origin/destination model parts will be created */ void SetOriginDestinationModelParts(ModelPart& rModelPart); /** * @brief This function clears the mortar conditions already created * @param rNodesArray The array of nodes to clear */ void ClearScalarMortarConditions(NodesArrayType& rNodesArray); /** * @brief This function clears the mortar conditions already created * @param rNodesArray The array of nodes to clear */ void ClearComponentsMortarConditions(NodesArrayType& rNodesArray); /** * @brief This function clears the ALM frictionless mortar conditions already created * @param rNodesArray The array of nodes to clear */ void ClearALMFrictionlessMortarConditions(NodesArrayType& rNodesArray); /** * @brief It check the conditions if they are correctly detected * @param pIndexesPairs Set containing the ids to the conditions * @param pCond1 The pointer to the condition in the destination model part * @param pCond2 The pointer to the condition in the destination model part * @param InvertedSearch If the search is inverted * @return If OK or Fail on the check */ inline CheckResult CheckCondition( IndexMap::Pointer pIndexesPairs, const Condition::Pointer pCond1, const Condition::Pointer pCond2, const bool InvertedSearch = false ); /** * @brief This method is used in case of not predefined master/slave we assign the master/slave nodes and conditions * @param rModelPart The model part to assign the flags */ static inline void NotPredefinedMasterSlave(ModelPart& rModelPart); /** * @brief This method gets the maximum the ID of the conditions */ inline IndexType GetMaximumConditionsIds(); /** * @brief This method checks the potential pairing between two conditions/geometries * @param rComputingModelPart The modelpart used in the assemble of the system * @param rConditionId The ID of the new condition to be created * @param pCondSlave The pointer to the slave condition * @param rPointsFound The potential pairs found * @param NumberOfPointsFound The number of potential pairs found * @param IndexesPairs The id sets of potential pairs */ inline void AddPotentialPairing( ModelPart& rComputingModelPart, IndexType& rConditionId, Condition::Pointer pCondSlave, PointVector& rPointsFound, const IndexType NumberOfPointsFound, IndexMap::Pointer IndexesPairs ); /** * @brief This method add a new pair to the computing model part * @param rComputingModelPart The modelpart used in the assemble of the system * @param rConditionId The ID of the new condition to be created * @param pCondSlave The pointer to the slave condition * @param pCondMaster The pointer to the master condition */ inline void AddPairing( ModelPart& rComputingModelPart, IndexType& rConditionId, Condition::Pointer pCondSlave, Condition::Pointer pCondMaster ); /** * @brief This method add a new pair to the computing model part * @param rComputingModelPart The modelpart used in the assemble of the system * @param rConditionId The ID of the new condition to be created * @param pCondSlave The pointer to the slave condition * @param pCondMaster The pointer to the master condition * @param IndexesPairs The map of indexes considered */ inline void AddPairing( ModelPart& rComputingModelPart, IndexType& rConditionId, Condition::Pointer pCondSlave, Condition::Pointer pCondMaster, IndexMap::Pointer IndexesPairs ); /** * @brief This method computes the gap using a mapper * @param SearchOrientation The orientation of the search (inverted or not) */ inline void ComputeMappedGap(const bool SearchOrientation); /** * @brief This method sets as inactive a node and it sets to zero its LM */ inline void ComputeWeightedReaction(); /** * @brief This method switchs the flag of an array of nodes * @param rNodes The set of nodes where the flags are reset */ static inline void SwitchFlagNodes(NodesArrayType& rNodes) { #pragma omp parallel for for(int i = 0; i < static_cast<int>(rNodes.size()); ++i) { auto it_node = rNodes.begin() + i; it_node->Flip(SLAVE); it_node->Flip(MASTER); } } /** * @brief This method creates the auxiliar the pairing * @param rContactModelPart The modelpart used in the assemble of the system * @param rComputingModelPart The modelpart used in the assemble of the system * @param rConditionId The ID of the new condition to be created */ inline void CreateAuxiliarConditions( ModelPart& rContactModelPart, ModelPart& rComputingModelPart, IndexType& rConditionId ); /** * @brief Calculates the minimal distance between one node and its center * @return The radius of the geometry */ static inline double Radius(GeometryType& ThisGeometry); /** * @brief This converts the framework string to an enum * @param str The string * @return SearchTreeType: The equivalent enum */ SearchTreeType ConvertSearchTree(const std::string& str); ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class BaseContactSearch ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ // /****************************** INPUT STREAM FUNCTION ******************************/ // /***********************************************************************************/ // // template<class TPointType, class TPointerType> // inline std::istream& operator >> (std::istream& rIStream, // BaseContactSearch& rThis); // // /***************************** OUTPUT STREAM FUNCTION ******************************/ // /***********************************************************************************/ // // template<class TPointType, class TPointerType> // inline std::ostream& operator << (std::ostream& rOStream, // const BaseContactSearch& rThis) // { // return rOStream; // } ///@} } // namespace Kratos. #endif // KRATOS_BASE_CONTACT_SEARCH_H_INCLUDED defined
GB_binop__iseq_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_fc32) // A.*B function (eWiseMult): GB (_AemultB_08__iseq_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_fc32) // A.*B function (eWiseMult): GB (_AemultB_04__iseq_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_fc32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_fc32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_fc32) // C=scalar+B GB (_bind1st__iseq_fc32) // C=scalar+B' GB (_bind1st_tran__iseq_fc32) // C=A+scalar GB (_bind2nd__iseq_fc32) // C=A'+scalar GB (_bind2nd_tran__iseq_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // A pattern? 0 // B type: GxB_FC32_t // B pattern? 0 // BinaryOp: cij = GB_FC32_iseq (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC32_iseq (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_FC32 || GxB_NO_ISEQ_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__iseq_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC32_t alpha_scalar ; GxB_FC32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__iseq_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__iseq_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC32_iseq (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC32_iseq (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_iseq (x, aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_iseq (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_fc32) // A.*B function (eWiseMult): GB (_AemultB_08__iseq_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_fc32) // A.*B function (eWiseMult): GB (_AemultB_04__iseq_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_fc32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_fc32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_fc32) // C=scalar+B GB (_bind1st__iseq_fc32) // C=scalar+B' GB (_bind1st_tran__iseq_fc32) // C=A+scalar GB (_bind2nd__iseq_fc32) // C=A'+scalar GB (_bind2nd_tran__iseq_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // A pattern? 0 // B type: GxB_FC32_t // B pattern? 0 // BinaryOp: cij = GB_FC32_iseq (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC32_iseq (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_FC32 || GxB_NO_ISEQ_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__iseq_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC32_t alpha_scalar ; GxB_FC32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__iseq_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__iseq_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC32_iseq (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC32_iseq (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_iseq (x, aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_iseq (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_fc32) // A.*B function (eWiseMult): GB (_AemultB_08__iseq_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_fc32) // A.*B function (eWiseMult): GB (_AemultB_04__iseq_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_fc32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_fc32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_fc32) // C=scalar+B GB (_bind1st__iseq_fc32) // C=scalar+B' GB (_bind1st_tran__iseq_fc32) // C=A+scalar GB (_bind2nd__iseq_fc32) // C=A'+scalar GB (_bind2nd_tran__iseq_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // A pattern? 0 // B type: GxB_FC32_t // B pattern? 0 // BinaryOp: cij = GB_FC32_iseq (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC32_iseq (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_FC32 || GxB_NO_ISEQ_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__iseq_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC32_t alpha_scalar ; GxB_FC32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__iseq_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__iseq_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC32_iseq (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC32_iseq (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_iseq (x, aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_iseq (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fox_floats_timer_caching_omp_fileIO_benchmark.c
/* fox_floats_timer_caching_omp_fileIO_benchmark.c -- uses Fox's algorithm to multiply two square matrices * * Implementation of parallel matrix multiplication: * LaTeX: $C_{i,j} = \sum_{k} A_{i,k}B_{k,j}$ * * Input: * Input Matrix file name: A.dat, B.dat * * Output: * Output Matrix file name: C.dat * Output Sub-matrices file name: SubMatrices.dat * * Notes: * 1. Assumes the number of processes is a perfect square * 2. The array member of the matrices is statically allocated * * See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI */ /* Compiler command: * mpiicc -O3 -qopenmp -qopt-report-phase=vec -qopt-report=3 fox_floats_timer_caching_omp_fileIO_benchmark.c * -o fox_floats_timer_caching_omp_fileIO_benchmark * * Run command: * mpirun -n -4 ./fox_floats_timer_caching_omp */ /* Head files */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> // define problem scale, matrix row/col size #define PROBLEM_SCALE 64 // define whether or not Print Matices in the Command Line #define PRINT_A 0 #define PRINT_B 0 #define PRINT_C 0 #define PRINT_LOCAL_A 0 #define PRINT_LOCAL_B 0 #define PRINT_LOCAL_C 0 // define float precision, 4 byte single-precision float or 8 byte double-precision float #define FLOAT double #define FLOAT_MPI MPI_DOUBLE // Define threads speed-up affnity in the computing #define NUM_THREADS 1 // Define threads affinity "scatter" or "compact" #define AFFINITY "KMP_AFFINITY = compact" /* Type define structure of process grid */ typedef struct { int p; /* Total number of processes */ MPI_Comm comm; /* Communicator for entire grid */ MPI_Comm row_comm; /* Communicator for my row */ MPI_Comm col_comm; /* Communicator for my col */ int q; /* Order of grid */ int my_row; /* My row number */ int my_col; /* My column number */ int my_rank; /* My rank in the grid comm */ } GRID_INFO_T; /* Type define structure of local matrix */ #define MAX 2097152 // Maximum number of elements in the array that store the local matrix (2^21) typedef struct { int n_bar; #define Order(A) ((A)->n_bar) // defination with parameters FLOAT entries[MAX]; #define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference } LOCAL_MATRIX_T; /* Function Declarations */ LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar); void Free_local_matrix(LOCAL_MATRIX_T** local_A); void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Read matrix A from a file void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B, // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k) GRID_INFO_T* grid, int n); // Read matrix B from a file void Print_matrix_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Print matrix A in the command line void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid, int n); // Print matrix B in the command line void Print_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Print matrix C in the command line void Set_to_zero(LOCAL_MATRIX_T* local_A); void Local_matrix_multiply(LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); void Build_matrix_type(LOCAL_MATRIX_T* local_A); MPI_Datatype local_matrix_mpi_t; LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer void Print_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); void Print_local_matrices_C(char* title, LOCAL_MATRIX_T* local_B, GRID_INFO_T* grid); void Write_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Write matrix multiplication to a file void Write_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix A to a file void Write_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); // Write local matrix B to a file void Write_local_matrices_C(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix C to a file /*********************************************************/ main(int argc, char* argv[]) { FILE *fp; int p; int my_rank; GRID_INFO_T grid; LOCAL_MATRIX_T* local_A; LOCAL_MATRIX_T* local_B; LOCAL_MATRIX_T* local_C; int n; int n_bar; double timer_start; double timer_end; int content; int i; int j; void Setup_grid(GRID_INFO_T* grid); void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); // Matrix Generator fp = fopen("A.dat", "w"); // Generate and print matrix A into a file for (i = 0; i < PROBLEM_SCALE; i++) { for (j = 0; j < PROBLEM_SCALE; j++) if(i == j){ fprintf(fp,"%d ", 1); } else { fprintf(fp,"%d ", 0); } fprintf(fp,"\n"); } fclose(fp); fp = fopen("B.dat", "w"); // Generate and print matrix B into a file for (i = 0; i < PROBLEM_SCALE; i++){ for (j = 0; j < PROBLEM_SCALE; j++) fprintf(fp,"%d ", (i*PROBLEM_SCALE)+j); fprintf(fp, "\n"); } fclose(fp); // SPMD Mode start from here (Processess fork from here) MPI_Init(&argc, &argv); // MPI initializing MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator // Initial OpenMP Environment omp_set_num_threads(NUM_THREADS); kmp_set_defaults(AFFINITY); Setup_grid(&grid); // Set up Processess grid if (my_rank == 0) { fp = fopen("A.dat","r"); n = 0; while((content = fgetc(fp)) != EOF) { //printf("fgetc = %d\n", content); if(content != 0x20 && content != 0x0A) n++; } fclose(fp); n = (int) sqrt((double) n); printf("We read the order of the matrices from A.dat is\n %d\n", n); // while(fgetc(fp) != EOF) n++; // printf("What's the order of the matrices?\n"); // scanf("%d", &n); // Overall Matrix's Order } MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order n_bar = n/grid.q; // \bar n is the local matrix's order local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A Order(local_A) = n_bar; // Local matrix A's order Read_matrix_A("Read A from A.dat", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_A == 1) Print_matrix_A("We read A =", local_A, &grid, n);// Print local matrices A from process 0 by using stdout, and send them to each process (Procedure) local_B = Local_matrix_allocate(n_bar); // Allocate local matrix Order(local_B) = n_bar; // Local matrix B's order Read_matrix_B("Read B from B.dat", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_B == 1) Print_matrix_B("We read B =", local_B, &grid, n);// Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure) Build_matrix_type(local_A); // Buid local_A's MPI matrix data type temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C Order(local_C) = n_bar; // Set matrix local_C's order MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier timer_start = MPI_Wtime(); // Get the MPI wall time Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function timer_end = MPI_Wtime(); // Get the MPI wall time MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier Write_matrix_C("Write C into the C.dat", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) if (PRINT_C == 1) Print_matrix_C("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) Write_local_matrices_A("Write split of local matrix A into local_A.dat", local_A, &grid); // Write local matrix A into file if (PRINT_LOCAL_A == 1) Print_local_matrices_A("Split of local matrix A", local_A, &grid); // Print matrix A split in processess Write_local_matrices_B("Write split of local matrix B into local_B.dat", local_B, &grid); // Write local matrix B into file, special for row-major storage if (PRINT_LOCAL_B == 1) Print_local_matrices_B("Split of local matrix B", local_B, &grid); // Print matrix B split in processess, special for row-major storage Write_local_matrices_C("Write split of local matrix C into local_C.dat", local_C, &grid); // Print matrix C split in processess if (PRINT_LOCAL_C == 1) Print_local_matrices_C("Split of local matrix C", local_C, &grid); // Print matrix C split in processess Free_local_matrix(&local_A); // Free local matrix local_A Free_local_matrix(&local_B); // Free local matrix local_B Free_local_matrix(&local_C); // Free local matrix local_C if(my_rank == 0) printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start); MPI_Finalize(); // MPI finalize, processes join and resource recycle } /* main */ /*********************************************************/ void Setup_grid( GRID_INFO_T* grid /* out */) { int old_rank; int dimensions[2]; int wrap_around[2]; int coordinates[2]; int free_coords[2]; /* Set up Global Grid Information */ MPI_Comm_size(MPI_COMM_WORLD, &(grid->p)); MPI_Comm_rank(MPI_COMM_WORLD, &old_rank); /* We assume p is a perfect square */ // but what if it's not a perfect square grid->q = (int) sqrt((double) grid->p); dimensions[0] = dimensions[1] = grid->q; /* We want a circular shift in second dimension. */ /* Don't care about first */ wrap_around[0] = wrap_around[1] = 1; MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions, wrap_around, 1, &(grid->comm)); MPI_Comm_rank(grid->comm, &(grid->my_rank)); MPI_Cart_coords(grid->comm, grid->my_rank, 2, coordinates); grid->my_row = coordinates[0]; grid->my_col = coordinates[1]; /* Set up row communicators */ free_coords[0] = 0; free_coords[1] = 1; MPI_Cart_sub(grid->comm, free_coords, &(grid->row_comm)); /* Set up column communicators */ free_coords[0] = 1; free_coords[1] = 0; MPI_Cart_sub(grid->comm, free_coords, &(grid->col_comm)); } /* Setup_grid */ /*********************************************************/ void Fox( int n /* in */, GRID_INFO_T* grid /* in */, LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */ /* matrix of A used during */ /* the current stage */ int stage; int bcast_root; int n_bar; /* n/sqrt(p) */ int source; int dest; MPI_Status status; n_bar = n/grid->q; Set_to_zero(local_C); /* Calculate addresses for row circular shift of B */ source = (grid->my_row + 1) % grid->q; dest = (grid->my_row + grid->q - 1) % grid->q; /* Set aside storage for the broadcast block of A */ temp_A = Local_matrix_allocate(n_bar); for (stage = 0; stage < grid->q; stage++) { bcast_root = (grid->my_row + stage) % grid->q; if (bcast_root == grid->my_col) { // Process P_{ii} broadcast A_{ii} in process gird's row commnunicator MPI_Bcast(local_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(local_A, local_B, local_C); } else { // temp_A is a buffer for process P_{ij} to store A_{ij} MPI_Bcast(temp_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(temp_A, local_B, local_C); } MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t, // MPI send and receive with single buffer dest, 0, source, 0, grid->col_comm, &status); // Circular shift of process grid B's row, after local multiplication operation } /* for */ } /* Fox */ /*********************************************************/ LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) { LOCAL_MATRIX_T* temp; temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T)); return temp; } /* Local_matrix_allocate */ /*********************************************************/ void Free_local_matrix( LOCAL_MATRIX_T** local_A_ptr /* in/out */) { free(*local_A_ptr); } /* Free_local_matrix */ /*********************************************************/ /* Read and distribute matrix for matrix A: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_A( char* prompt /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("A.dat","r"); temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { for (mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp, "%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp,"%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); } } /* Read_matrix */ /*********************************************************/ /* Read and distribute matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_B( char* prompt /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT *temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("B.dat","r"); temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { // process 0 (local) for (mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage /* scanf("%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage */ /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage for (mat_col = 0; mat_col < Order(local_B); mat_col++) { MPI_Recv(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage for(mat_row = 0; mat_row < Order(local_B); mat_row++) Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage /* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); */ } free(temp); } } /* Read_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_A */ /*********************************************************/ /* Recive and Print Matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); for (mat_col = 0; mat_col < Order(local_B); mat_col++) { for(mat_row = 0; mat_row < Order(local_B); mat_row++) *(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm); } free(temp); } } /* Print_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", Entry(local_C, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_C */ /*********************************************************/ /* Recive and Write Matrix C into a file: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Write_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { fp = fopen("C.dat", "w+"); temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", Entry(local_C, mat_row, mat_col)); // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", temp[mat_col]); // printf("%20.15E ", temp[mat_col]); } } fprintf(fp,"\n"); } free(temp); fclose(fp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Write_matrix_C */ /*********************************************************/ /* * Set local matrix's element to zero */ void Set_to_zero( LOCAL_MATRIX_T* local_A /* out */) { int i, j; for (i = 0; i < Order(local_A); i++) for (j = 0; j < Order(local_A); j++) Entry(local_A,i,j) = 0.0E0; } /* Set_to_zero */ /*********************************************************/ void Build_matrix_type( LOCAL_MATRIX_T* local_A /* in */) { MPI_Datatype temp_mpi_t; int block_lengths[2]; MPI_Aint displacements[2]; MPI_Datatype typelist[2]; MPI_Aint start_address; MPI_Aint address; MPI_Type_contiguous(Order(local_A)*Order(local_A), FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype /* Synopsis int MPI_Type_contiguous(int count, MPI_Datatype oldtype, MPI_Datatype *newtype) Input Parameters count replication count (nonnegative integer) oldtype old datatype (handle) */ block_lengths[0] = block_lengths[1] = 1; typelist[0] = MPI_INT; typelist[1] = temp_mpi_t; MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory MPI_Address(&(local_A->n_bar), &address); /* Synopsis int MPI_Address(const void *location, MPI_Aint *address) Input Parameters location location in caller memory (choice) Output Parameters address address of location (address integer) */ displacements[0] = address - start_address; MPI_Address(local_A->entries, &address); displacements[1] = address - start_address; MPI_Type_struct(2, block_lengths, displacements, typelist, &local_matrix_mpi_t); // Creates a struct datatype /* Synopsis int MPI_Type_struct(int count, const int *array_of_blocklengths, const MPI_Aint *array_of_displacements, const MPI_Datatype *array_of_types, MPI_Datatype *newtype) Input Parameters count number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths array_of_blocklengths number of elements in each block (array) array_of_displacements byte displacement of each block (array) array_of_types type of elements in each block (array of handles to datatype objects) Output Parameters newtype new datatype (handle) */ MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype /* Synopsis int MPI_Type_commit(MPI_Datatype *datatype) Input Parameters datatype datatype (handle) */ } /* Build_matrix_type */ /*********************************************************/ /* local matrix multiplication function * withing OpenMP Thread Acceleration */ void Local_matrix_multiply( LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { int i, j, k; // int my_rank; // MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) // Threads acceleration upgrade, parallel task split for (i = 0; i < Order(local_A); i++) { // printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num()); for (j = 0; j < Order(local_A); j++) for (k = 0; k < Order(local_B); k++) Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) /* Entry(local_C,i,j) = Entry(local_C,i,j) + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper */ } } /* Local_matrix_multiply */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) printf("%20.15E ", Entry(local_A,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_A */ /*********************************************************/ /* Recive and Print Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } } fflush(stdout); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_B */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) printf("%20.15E ", Entry(local_C,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_C */ /*********************************************************/ /* Recive and Write Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_A.dat","w+"); printf("%s\n", title); fprintf(fp,"Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) fprintf(fp,"%20.15E ", Entry(local_A,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_A */ /*********************************************************/ /* Recive and Write Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_B.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) fprintf(fp, "%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_B */ /*********************************************************/ /* Recive and Write Local Matrix C: * Process 0 print local matrix local_C * Other Processess send local matrix local_C to process 0 * And process 0 receive local matrix local_C from other processess */ void Write_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_C.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) fprintf(fp, "%20.15E ", Entry(local_C,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_C */
/* fox_floats_timer_caching_omp_fileIO_benchmark.c -- uses Fox's algorithm to multiply two square matrices * * Implementation of parallel matrix multiplication: * LaTeX: $C_{i,j} = \sum_{k} A_{i,k}B_{k,j}$ * * Input: * Input Matrix file name: A.dat, B.dat * * Output: * Output Matrix file name: C.dat * Output Sub-matrices file name: SubMatrices.dat * * Notes: * 1. Assumes the number of processes is a perfect square * 2. The array member of the matrices is statically allocated * * See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI */ /* Compiler command: * mpiicc -O3 -qopenmp -qopt-report-phase=vec -qopt-report=3 fox_floats_timer_caching_omp_fileIO_benchmark.c * -o fox_floats_timer_caching_omp_fileIO_benchmark * * Run command: * mpirun -n -4 ./fox_floats_timer_caching_omp */ /* Head files */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> // define problem scale, matrix row/col size #define PROBLEM_SCALE 64 // define whether or not Print Matices in the Command Line #define PRINT_A 0 #define PRINT_B 0 #define PRINT_C 0 #define PRINT_LOCAL_A 0 #define PRINT_LOCAL_B 0 #define PRINT_LOCAL_C 0 // define float precision, 4 byte single-precision float or 8 byte double-precision float #define FLOAT double #define FLOAT_MPI MPI_DOUBLE // Define threads speed-up affnity in the computing #define NUM_THREADS 1 // Define threads affinity "scatter" or "compact" #define AFFINITY "KMP_AFFINITY = compact" /* Type define structure of process grid */ typedef struct { int p; /* Total number of processes */ MPI_Comm comm; /* Communicator for entire grid */ MPI_Comm row_comm; /* Communicator for my row */ MPI_Comm col_comm; /* Communicator for my col */ int q; /* Order of grid */ int my_row; /* My row number */ int my_col; /* My column number */ int my_rank; /* My rank in the grid comm */ } GRID_INFO_T; /* Type define structure of local matrix */ #define MAX 2097152 // Maximum number of elements in the array that store the local matrix (2^21) typedef struct { int n_bar; #define Order(A) ((A)->n_bar) // defination with parameters FLOAT entries[MAX]; #define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference } LOCAL_MATRIX_T; /* Function Declarations */ LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar); void Free_local_matrix(LOCAL_MATRIX_T** local_A); void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Read matrix A from a file void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B, // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k) GRID_INFO_T* grid, int n); // Read matrix B from a file void Print_matrix_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Print matrix A in the command line void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid, int n); // Print matrix B in the command line void Print_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Print matrix C in the command line void Set_to_zero(LOCAL_MATRIX_T* local_A); void Local_matrix_multiply(LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); void Build_matrix_type(LOCAL_MATRIX_T* local_A); MPI_Datatype local_matrix_mpi_t; LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer void Print_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); void Print_local_matrices_C(char* title, LOCAL_MATRIX_T* local_B, GRID_INFO_T* grid); void Write_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Write matrix multiplication to a file void Write_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix A to a file void Write_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); // Write local matrix B to a file void Write_local_matrices_C(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix C to a file /*********************************************************/ main(int argc, char* argv[]) { FILE *fp; int p; int my_rank; GRID_INFO_T grid; LOCAL_MATRIX_T* local_A; LOCAL_MATRIX_T* local_B; LOCAL_MATRIX_T* local_C; int n; int n_bar; double timer_start; double timer_end; int content; int i; int j; void Setup_grid(GRID_INFO_T* grid); void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); // Matrix Generator fp = fopen("A.dat", "w"); // Generate and print matrix A into a file for (i = 0; i < PROBLEM_SCALE; i++) { for (j = 0; j < PROBLEM_SCALE; j++) if(i == j){ fprintf(fp,"%d ", 1); } else { fprintf(fp,"%d ", 0); } fprintf(fp,"\n"); } fclose(fp); fp = fopen("B.dat", "w"); // Generate and print matrix B into a file for (i = 0; i < PROBLEM_SCALE; i++){ for (j = 0; j < PROBLEM_SCALE; j++) fprintf(fp,"%d ", (i*PROBLEM_SCALE)+j); fprintf(fp, "\n"); } fclose(fp); // SPMD Mode start from here (Processess fork from here) MPI_Init(&argc, &argv); // MPI initializing MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator // Initial OpenMP Environment omp_set_num_threads(NUM_THREADS); kmp_set_defaults(AFFINITY); Setup_grid(&grid); // Set up Processess grid if (my_rank == 0) { fp = fopen("A.dat","r"); n = 0; while((content = fgetc(fp)) != EOF) { //printf("fgetc = %d\n", content); if(content != 0x20 && content != 0x0A) n++; } fclose(fp); n = (int) sqrt((double) n); printf("We read the order of the matrices from A.dat is\n %d\n", n); // while(fgetc(fp) != EOF) n++; // printf("What's the order of the matrices?\n"); // scanf("%d", &n); // Overall Matrix's Order } MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order n_bar = n/grid.q; // \bar n is the local matrix's order local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A Order(local_A) = n_bar; // Local matrix A's order Read_matrix_A("Read A from A.dat", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_A == 1) Print_matrix_A("We read A =", local_A, &grid, n);// Print local matrices A from process 0 by using stdout, and send them to each process (Procedure) local_B = Local_matrix_allocate(n_bar); // Allocate local matrix Order(local_B) = n_bar; // Local matrix B's order Read_matrix_B("Read B from B.dat", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_B == 1) Print_matrix_B("We read B =", local_B, &grid, n);// Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure) Build_matrix_type(local_A); // Buid local_A's MPI matrix data type temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C Order(local_C) = n_bar; // Set matrix local_C's order MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier timer_start = MPI_Wtime(); // Get the MPI wall time Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function timer_end = MPI_Wtime(); // Get the MPI wall time MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier Write_matrix_C("Write C into the C.dat", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) if (PRINT_C == 1) Print_matrix_C("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) Write_local_matrices_A("Write split of local matrix A into local_A.dat", local_A, &grid); // Write local matrix A into file if (PRINT_LOCAL_A == 1) Print_local_matrices_A("Split of local matrix A", local_A, &grid); // Print matrix A split in processess Write_local_matrices_B("Write split of local matrix B into local_B.dat", local_B, &grid); // Write local matrix B into file, special for row-major storage if (PRINT_LOCAL_B == 1) Print_local_matrices_B("Split of local matrix B", local_B, &grid); // Print matrix B split in processess, special for row-major storage Write_local_matrices_C("Write split of local matrix C into local_C.dat", local_C, &grid); // Print matrix C split in processess if (PRINT_LOCAL_C == 1) Print_local_matrices_C("Split of local matrix C", local_C, &grid); // Print matrix C split in processess Free_local_matrix(&local_A); // Free local matrix local_A Free_local_matrix(&local_B); // Free local matrix local_B Free_local_matrix(&local_C); // Free local matrix local_C if(my_rank == 0) printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start); MPI_Finalize(); // MPI finalize, processes join and resource recycle } /* main */ /*********************************************************/ void Setup_grid( GRID_INFO_T* grid /* out */) { int old_rank; int dimensions[2]; int wrap_around[2]; int coordinates[2]; int free_coords[2]; /* Set up Global Grid Information */ MPI_Comm_size(MPI_COMM_WORLD, &(grid->p)); MPI_Comm_rank(MPI_COMM_WORLD, &old_rank); /* We assume p is a perfect square */ // but what if it's not a perfect square grid->q = (int) sqrt((double) grid->p); dimensions[0] = dimensions[1] = grid->q; /* We want a circular shift in second dimension. */ /* Don't care about first */ wrap_around[0] = wrap_around[1] = 1; MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions, wrap_around, 1, &(grid->comm)); MPI_Comm_rank(grid->comm, &(grid->my_rank)); MPI_Cart_coords(grid->comm, grid->my_rank, 2, coordinates); grid->my_row = coordinates[0]; grid->my_col = coordinates[1]; /* Set up row communicators */ free_coords[0] = 0; free_coords[1] = 1; MPI_Cart_sub(grid->comm, free_coords, &(grid->row_comm)); /* Set up column communicators */ free_coords[0] = 1; free_coords[1] = 0; MPI_Cart_sub(grid->comm, free_coords, &(grid->col_comm)); } /* Setup_grid */ /*********************************************************/ void Fox( int n /* in */, GRID_INFO_T* grid /* in */, LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */ /* matrix of A used during */ /* the current stage */ int stage; int bcast_root; int n_bar; /* n/sqrt(p) */ int source; int dest; MPI_Status status; n_bar = n/grid->q; Set_to_zero(local_C); /* Calculate addresses for row circular shift of B */ source = (grid->my_row + 1) % grid->q; dest = (grid->my_row + grid->q - 1) % grid->q; /* Set aside storage for the broadcast block of A */ temp_A = Local_matrix_allocate(n_bar); for (stage = 0; stage < grid->q; stage++) { bcast_root = (grid->my_row + stage) % grid->q; if (bcast_root == grid->my_col) { // Process P_{ii} broadcast A_{ii} in process gird's row commnunicator MPI_Bcast(local_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(local_A, local_B, local_C); } else { // temp_A is a buffer for process P_{ij} to store A_{ij} MPI_Bcast(temp_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(temp_A, local_B, local_C); } MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t, // MPI send and receive with single buffer dest, 0, source, 0, grid->col_comm, &status); // Circular shift of process grid B's row, after local multiplication operation } /* for */ } /* Fox */ /*********************************************************/ LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) { LOCAL_MATRIX_T* temp; temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T)); return temp; } /* Local_matrix_allocate */ /*********************************************************/ void Free_local_matrix( LOCAL_MATRIX_T** local_A_ptr /* in/out */) { free(*local_A_ptr); } /* Free_local_matrix */ /*********************************************************/ /* Read and distribute matrix for matrix A: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_A( char* prompt /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("A.dat","r"); temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { for (mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp, "%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp,"%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); } } /* Read_matrix */ /*********************************************************/ /* Read and distribute matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_B( char* prompt /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT *temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("B.dat","r"); temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { // process 0 (local) for (mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage /* scanf("%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage */ /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage for (mat_col = 0; mat_col < Order(local_B); mat_col++) { MPI_Recv(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage for(mat_row = 0; mat_row < Order(local_B); mat_row++) Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage /* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); */ } free(temp); } } /* Read_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_A */ /*********************************************************/ /* Recive and Print Matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); for (mat_col = 0; mat_col < Order(local_B); mat_col++) { for(mat_row = 0; mat_row < Order(local_B); mat_row++) *(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm); } free(temp); } } /* Print_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", Entry(local_C, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_C */ /*********************************************************/ /* Recive and Write Matrix C into a file: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Write_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { fp = fopen("C.dat", "w+"); temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", Entry(local_C, mat_row, mat_col)); // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", temp[mat_col]); // printf("%20.15E ", temp[mat_col]); } } fprintf(fp,"\n"); } free(temp); fclose(fp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Write_matrix_C */ /*********************************************************/ /* * Set local matrix's element to zero */ void Set_to_zero( LOCAL_MATRIX_T* local_A /* out */) { int i, j; for (i = 0; i < Order(local_A); i++) for (j = 0; j < Order(local_A); j++) Entry(local_A,i,j) = 0.0E0; } /* Set_to_zero */ /*********************************************************/ void Build_matrix_type( LOCAL_MATRIX_T* local_A /* in */) { MPI_Datatype temp_mpi_t; int block_lengths[2]; MPI_Aint displacements[2]; MPI_Datatype typelist[2]; MPI_Aint start_address; MPI_Aint address; MPI_Type_contiguous(Order(local_A)*Order(local_A), FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype /* Synopsis int MPI_Type_contiguous(int count, MPI_Datatype oldtype, MPI_Datatype *newtype) Input Parameters count replication count (nonnegative integer) oldtype old datatype (handle) */ block_lengths[0] = block_lengths[1] = 1; typelist[0] = MPI_INT; typelist[1] = temp_mpi_t; MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory MPI_Address(&(local_A->n_bar), &address); /* Synopsis int MPI_Address(const void *location, MPI_Aint *address) Input Parameters location location in caller memory (choice) Output Parameters address address of location (address integer) */ displacements[0] = address - start_address; MPI_Address(local_A->entries, &address); displacements[1] = address - start_address; MPI_Type_struct(2, block_lengths, displacements, typelist, &local_matrix_mpi_t); // Creates a struct datatype /* Synopsis int MPI_Type_struct(int count, const int *array_of_blocklengths, const MPI_Aint *array_of_displacements, const MPI_Datatype *array_of_types, MPI_Datatype *newtype) Input Parameters count number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths array_of_blocklengths number of elements in each block (array) array_of_displacements byte displacement of each block (array) array_of_types type of elements in each block (array of handles to datatype objects) Output Parameters newtype new datatype (handle) */ MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype /* Synopsis int MPI_Type_commit(MPI_Datatype *datatype) Input Parameters datatype datatype (handle) */ } /* Build_matrix_type */ /*********************************************************/ /* local matrix multiplication function * withing OpenMP Thread Acceleration */ void Local_matrix_multiply( LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { int i, j, k; // int my_rank; // MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator for (i = 0; i < Order(local_A); i++) { // printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num()); for (j = 0; j < Order(local_A); j++) for (k = 0; k < Order(local_B); k++) Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) /* Entry(local_C,i,j) = Entry(local_C,i,j) + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper */ } } /* Local_matrix_multiply */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) printf("%20.15E ", Entry(local_A,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_A */ /*********************************************************/ /* Recive and Print Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } } fflush(stdout); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_B */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) printf("%20.15E ", Entry(local_C,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_C */ /*********************************************************/ /* Recive and Write Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_A.dat","w+"); printf("%s\n", title); fprintf(fp,"Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) fprintf(fp,"%20.15E ", Entry(local_A,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_A */ /*********************************************************/ /* Recive and Write Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_B.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) fprintf(fp, "%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_B */ /*********************************************************/ /* Recive and Write Local Matrix C: * Process 0 print local matrix local_C * Other Processess send local matrix local_C to process 0 * And process 0 receive local matrix local_C from other processess */ void Write_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_C.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) fprintf(fp, "%20.15E ", Entry(local_C,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_C */
/* fox_floats_timer_caching_omp_fileIO_benchmark.c -- uses Fox's algorithm to multiply two square matrices * * Implementation of parallel matrix multiplication: * LaTeX: $C_{i,j} = \sum_{k} A_{i,k}B_{k,j}$ * * Input: * Input Matrix file name: A.dat, B.dat * * Output: * Output Matrix file name: C.dat * Output Sub-matrices file name: SubMatrices.dat * * Notes: * 1. Assumes the number of processes is a perfect square * 2. The array member of the matrices is statically allocated * * See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI */ /* Compiler command: * mpiicc -O3 -qopenmp -qopt-report-phase=vec -qopt-report=3 fox_floats_timer_caching_omp_fileIO_benchmark.c * -o fox_floats_timer_caching_omp_fileIO_benchmark * * Run command: * mpirun -n -4 ./fox_floats_timer_caching_omp */ /* Head files */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> // define problem scale, matrix row/col size #define PROBLEM_SCALE 64 // define whether or not Print Matices in the Command Line #define PRINT_A 0 #define PRINT_B 0 #define PRINT_C 0 #define PRINT_LOCAL_A 0 #define PRINT_LOCAL_B 0 #define PRINT_LOCAL_C 0 // define float precision, 4 byte single-precision float or 8 byte double-precision float #define FLOAT double #define FLOAT_MPI MPI_DOUBLE // Define threads speed-up affnity in the computing #define NUM_THREADS 1 // Define threads affinity "scatter" or "compact" #define AFFINITY "KMP_AFFINITY = compact" /* Type define structure of process grid */ typedef struct { int p; /* Total number of processes */ MPI_Comm comm; /* Communicator for entire grid */ MPI_Comm row_comm; /* Communicator for my row */ MPI_Comm col_comm; /* Communicator for my col */ int q; /* Order of grid */ int my_row; /* My row number */ int my_col; /* My column number */ int my_rank; /* My rank in the grid comm */ } GRID_INFO_T; /* Type define structure of local matrix */ #define MAX 2097152 // Maximum number of elements in the array that store the local matrix (2^21) typedef struct { int n_bar; #define Order(A) ((A)->n_bar) // defination with parameters FLOAT entries[MAX]; #define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference } LOCAL_MATRIX_T; /* Function Declarations */ LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar); void Free_local_matrix(LOCAL_MATRIX_T** local_A); void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Read matrix A from a file void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B, // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k) GRID_INFO_T* grid, int n); // Read matrix B from a file void Print_matrix_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Print matrix A in the command line void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid, int n); // Print matrix B in the command line void Print_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Print matrix C in the command line void Set_to_zero(LOCAL_MATRIX_T* local_A); void Local_matrix_multiply(LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); void Build_matrix_type(LOCAL_MATRIX_T* local_A); MPI_Datatype local_matrix_mpi_t; LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer void Print_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); void Print_local_matrices_C(char* title, LOCAL_MATRIX_T* local_B, GRID_INFO_T* grid); void Write_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Write matrix multiplication to a file void Write_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix A to a file void Write_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); // Write local matrix B to a file void Write_local_matrices_C(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix C to a file /*********************************************************/ main(int argc, char* argv[]) { FILE *fp; int p; int my_rank; GRID_INFO_T grid; LOCAL_MATRIX_T* local_A; LOCAL_MATRIX_T* local_B; LOCAL_MATRIX_T* local_C; int n; int n_bar; double timer_start; double timer_end; int content; int i; int j; void Setup_grid(GRID_INFO_T* grid); void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); // Matrix Generator fp = fopen("A.dat", "w"); // Generate and print matrix A into a file for (i = 0; i < PROBLEM_SCALE; i++) { for (j = 0; j < PROBLEM_SCALE; j++) if(i == j){ fprintf(fp,"%d ", 1); } else { fprintf(fp,"%d ", 0); } fprintf(fp,"\n"); } fclose(fp); fp = fopen("B.dat", "w"); // Generate and print matrix B into a file for (i = 0; i < PROBLEM_SCALE; i++){ for (j = 0; j < PROBLEM_SCALE; j++) fprintf(fp,"%d ", (i*PROBLEM_SCALE)+j); fprintf(fp, "\n"); } fclose(fp); // SPMD Mode start from here (Processess fork from here) MPI_Init(&argc, &argv); // MPI initializing MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator // Initial OpenMP Environment omp_set_num_threads(NUM_THREADS); kmp_set_defaults(AFFINITY); Setup_grid(&grid); // Set up Processess grid if (my_rank == 0) { fp = fopen("A.dat","r"); n = 0; while((content = fgetc(fp)) != EOF) { //printf("fgetc = %d\n", content); if(content != 0x20 && content != 0x0A) n++; } fclose(fp); n = (int) sqrt((double) n); printf("We read the order of the matrices from A.dat is\n %d\n", n); // while(fgetc(fp) != EOF) n++; // printf("What's the order of the matrices?\n"); // scanf("%d", &n); // Overall Matrix's Order } MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order n_bar = n/grid.q; // \bar n is the local matrix's order local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A Order(local_A) = n_bar; // Local matrix A's order Read_matrix_A("Read A from A.dat", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_A == 1) Print_matrix_A("We read A =", local_A, &grid, n);// Print local matrices A from process 0 by using stdout, and send them to each process (Procedure) local_B = Local_matrix_allocate(n_bar); // Allocate local matrix Order(local_B) = n_bar; // Local matrix B's order Read_matrix_B("Read B from B.dat", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_B == 1) Print_matrix_B("We read B =", local_B, &grid, n);// Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure) Build_matrix_type(local_A); // Buid local_A's MPI matrix data type temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C Order(local_C) = n_bar; // Set matrix local_C's order MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier timer_start = MPI_Wtime(); // Get the MPI wall time Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function timer_end = MPI_Wtime(); // Get the MPI wall time MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier Write_matrix_C("Write C into the C.dat", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) if (PRINT_C == 1) Print_matrix_C("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) Write_local_matrices_A("Write split of local matrix A into local_A.dat", local_A, &grid); // Write local matrix A into file if (PRINT_LOCAL_A == 1) Print_local_matrices_A("Split of local matrix A", local_A, &grid); // Print matrix A split in processess Write_local_matrices_B("Write split of local matrix B into local_B.dat", local_B, &grid); // Write local matrix B into file, special for row-major storage if (PRINT_LOCAL_B == 1) Print_local_matrices_B("Split of local matrix B", local_B, &grid); // Print matrix B split in processess, special for row-major storage Write_local_matrices_C("Write split of local matrix C into local_C.dat", local_C, &grid); // Print matrix C split in processess if (PRINT_LOCAL_C == 1) Print_local_matrices_C("Split of local matrix C", local_C, &grid); // Print matrix C split in processess Free_local_matrix(&local_A); // Free local matrix local_A Free_local_matrix(&local_B); // Free local matrix local_B Free_local_matrix(&local_C); // Free local matrix local_C if(my_rank == 0) printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start); MPI_Finalize(); // MPI finalize, processes join and resource recycle } /* main */ /*********************************************************/ void Setup_grid( GRID_INFO_T* grid /* out */) { int old_rank; int dimensions[2]; int wrap_around[2]; int coordinates[2]; int free_coords[2]; /* Set up Global Grid Information */ MPI_Comm_size(MPI_COMM_WORLD, &(grid->p)); MPI_Comm_rank(MPI_COMM_WORLD, &old_rank); /* We assume p is a perfect square */ // but what if it's not a perfect square grid->q = (int) sqrt((double) grid->p); dimensions[0] = dimensions[1] = grid->q; /* We want a circular shift in second dimension. */ /* Don't care about first */ wrap_around[0] = wrap_around[1] = 1; MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions, wrap_around, 1, &(grid->comm)); MPI_Comm_rank(grid->comm, &(grid->my_rank)); MPI_Cart_coords(grid->comm, grid->my_rank, 2, coordinates); grid->my_row = coordinates[0]; grid->my_col = coordinates[1]; /* Set up row communicators */ free_coords[0] = 0; free_coords[1] = 1; MPI_Cart_sub(grid->comm, free_coords, &(grid->row_comm)); /* Set up column communicators */ free_coords[0] = 1; free_coords[1] = 0; MPI_Cart_sub(grid->comm, free_coords, &(grid->col_comm)); } /* Setup_grid */ /*********************************************************/ void Fox( int n /* in */, GRID_INFO_T* grid /* in */, LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */ /* matrix of A used during */ /* the current stage */ int stage; int bcast_root; int n_bar; /* n/sqrt(p) */ int source; int dest; MPI_Status status; n_bar = n/grid->q; Set_to_zero(local_C); /* Calculate addresses for row circular shift of B */ source = (grid->my_row + 1) % grid->q; dest = (grid->my_row + grid->q - 1) % grid->q; /* Set aside storage for the broadcast block of A */ temp_A = Local_matrix_allocate(n_bar); for (stage = 0; stage < grid->q; stage++) { bcast_root = (grid->my_row + stage) % grid->q; if (bcast_root == grid->my_col) { // Process P_{ii} broadcast A_{ii} in process gird's row commnunicator MPI_Bcast(local_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(local_A, local_B, local_C); } else { // temp_A is a buffer for process P_{ij} to store A_{ij} MPI_Bcast(temp_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(temp_A, local_B, local_C); } MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t, // MPI send and receive with single buffer dest, 0, source, 0, grid->col_comm, &status); // Circular shift of process grid B's row, after local multiplication operation } /* for */ } /* Fox */ /*********************************************************/ LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) { LOCAL_MATRIX_T* temp; temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T)); return temp; } /* Local_matrix_allocate */ /*********************************************************/ void Free_local_matrix( LOCAL_MATRIX_T** local_A_ptr /* in/out */) { free(*local_A_ptr); } /* Free_local_matrix */ /*********************************************************/ /* Read and distribute matrix for matrix A: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_A( char* prompt /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("A.dat","r"); temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { for (mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp, "%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp,"%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); } } /* Read_matrix */ /*********************************************************/ /* Read and distribute matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_B( char* prompt /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT *temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("B.dat","r"); temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { // process 0 (local) for (mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage /* scanf("%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage */ /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage for (mat_col = 0; mat_col < Order(local_B); mat_col++) { MPI_Recv(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage for(mat_row = 0; mat_row < Order(local_B); mat_row++) Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage /* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); */ } free(temp); } } /* Read_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_A */ /*********************************************************/ /* Recive and Print Matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); for (mat_col = 0; mat_col < Order(local_B); mat_col++) { for(mat_row = 0; mat_row < Order(local_B); mat_row++) *(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm); } free(temp); } } /* Print_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", Entry(local_C, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_C */ /*********************************************************/ /* Recive and Write Matrix C into a file: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Write_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { fp = fopen("C.dat", "w+"); temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", Entry(local_C, mat_row, mat_col)); // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", temp[mat_col]); // printf("%20.15E ", temp[mat_col]); } } fprintf(fp,"\n"); } free(temp); fclose(fp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Write_matrix_C */ /*********************************************************/ /* * Set local matrix's element to zero */ void Set_to_zero( LOCAL_MATRIX_T* local_A /* out */) { int i, j; for (i = 0; i < Order(local_A); i++) for (j = 0; j < Order(local_A); j++) Entry(local_A,i,j) = 0.0E0; } /* Set_to_zero */ /*********************************************************/ void Build_matrix_type( LOCAL_MATRIX_T* local_A /* in */) { MPI_Datatype temp_mpi_t; int block_lengths[2]; MPI_Aint displacements[2]; MPI_Datatype typelist[2]; MPI_Aint start_address; MPI_Aint address; MPI_Type_contiguous(Order(local_A)*Order(local_A), FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype /* Synopsis int MPI_Type_contiguous(int count, MPI_Datatype oldtype, MPI_Datatype *newtype) Input Parameters count replication count (nonnegative integer) oldtype old datatype (handle) */ block_lengths[0] = block_lengths[1] = 1; typelist[0] = MPI_INT; typelist[1] = temp_mpi_t; MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory MPI_Address(&(local_A->n_bar), &address); /* Synopsis int MPI_Address(const void *location, MPI_Aint *address) Input Parameters location location in caller memory (choice) Output Parameters address address of location (address integer) */ displacements[0] = address - start_address; MPI_Address(local_A->entries, &address); displacements[1] = address - start_address; MPI_Type_struct(2, block_lengths, displacements, typelist, &local_matrix_mpi_t); // Creates a struct datatype /* Synopsis int MPI_Type_struct(int count, const int *array_of_blocklengths, const MPI_Aint *array_of_displacements, const MPI_Datatype *array_of_types, MPI_Datatype *newtype) Input Parameters count number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths array_of_blocklengths number of elements in each block (array) array_of_displacements byte displacement of each block (array) array_of_types type of elements in each block (array of handles to datatype objects) Output Parameters newtype new datatype (handle) */ MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype /* Synopsis int MPI_Type_commit(MPI_Datatype *datatype) Input Parameters datatype datatype (handle) */ } /* Build_matrix_type */ /*********************************************************/ /* local matrix multiplication function * withing OpenMP Thread Acceleration */ void Local_matrix_multiply( LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { int i, j, k; // int my_rank; // MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) // Threads acceleration upgrade, parallel task split for (i = 0; i < Order(local_A); i++) { // printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num()); for (j = 0; j < Order(local_A); j++) for (k = 0; k < Order(local_B); k++) Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) /* Entry(local_C,i,j) = Entry(local_C,i,j) + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper */ } } /* Local_matrix_multiply */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) printf("%20.15E ", Entry(local_A,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_A */ /*********************************************************/ /* Recive and Print Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } } fflush(stdout); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_B */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) printf("%20.15E ", Entry(local_C,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_C */ /*********************************************************/ /* Recive and Write Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_A.dat","w+"); printf("%s\n", title); fprintf(fp,"Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) fprintf(fp,"%20.15E ", Entry(local_A,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_A */ /*********************************************************/ /* Recive and Write Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_B.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) fprintf(fp, "%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_B */ /*********************************************************/ /* Recive and Write Local Matrix C: * Process 0 print local matrix local_C * Other Processess send local matrix local_C to process 0 * And process 0 receive local matrix local_C from other processess */ void Write_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_C.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) fprintf(fp, "%20.15E ", Entry(local_C,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_C */
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
/* * Order-2, 3D 25 point stencil Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****)malloc(sizeof(double ***) * 2); double ***roc2 = (double ***)malloc(sizeof(double **)); A[0] = (double ***)malloc(sizeof(double **) * Nz); A[1] = (double ***)malloc(sizeof(double **) * Nz); roc2 = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[0][i] = (double **)malloc(sizeof(double *) * Ny); A[1][i] = (double **)malloc(sizeof(double *) * Ny); roc2[i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[0][i][j] = (double *)malloc(sizeof(double) * Nx); A[1][i][j] = (double *)malloc(sizeof(double) * Nx); roc2[i][j] = (double *)malloc(sizeof(double) * Nx); } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 256; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz - 4; i++) { for (j = 4; j < Ny - 4; j++) { for (k = 4; k < Nx - 4; k++) { A[(t + 1) % 2][i][j][k] = 2.0 * A[t % 2][i][j][k] - A[(t + 1) % 2][i][j][k] + roc2[i][j][k] * ( coef0 * A[t % 2][i][j][k] + coef1 * (A[t % 2][i - 1][j][k] + A[t % 2][i + 1][j][k] + A[t % 2][i][j - 1][k] + A[t % 2][i][j + 1][k] + A[t % 2][i][j][k - 1] + A[t % 2][i][j][k + 1]) + coef2 * (A[t % 2][i - 2][j][k] + A[t % 2][i + 2][j][k] + A[t % 2][i][j - 2][k] + A[t % 2][i][j + 2][k] + A[t % 2][i][j][k - 2] + A[t % 2][i][j][k + 2]) + coef3 * (A[t % 2][i - 3][j][k] + A[t % 2][i + 3][j][k] + A[t % 2][i][j - 3][k] + A[t % 2][i][j + 3][k] + A[t % 2][i][j][k - 3] + A[t % 2][i][j][k + 3]) + coef4 * (A[t % 2][i - 4][j][k] + A[t % 2][i + 4][j][k] + A[t % 2][i][j - 4][k] + A[t % 2][i][j + 4][k] + A[t % 2][i][j][k - 4] + A[t % 2][i][j][k + 4])); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
/* * Order-2, 3D 25 point stencil Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****)malloc(sizeof(double ***) * 2); double ***roc2 = (double ***)malloc(sizeof(double **)); A[0] = (double ***)malloc(sizeof(double **) * Nz); A[1] = (double ***)malloc(sizeof(double **) * Nz); roc2 = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[0][i] = (double **)malloc(sizeof(double *) * Ny); A[1][i] = (double **)malloc(sizeof(double *) * Ny); roc2[i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[0][i][j] = (double *)malloc(sizeof(double) * Nx); A[1][i][j] = (double *)malloc(sizeof(double) * Nx); roc2[i][j] = (double *)malloc(sizeof(double) * Nx); } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 256; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz - 4; i++) { for (j = 4; j < Ny - 4; j++) { for (k = 4; k < Nx - 4; k++) { A[(t + 1) % 2][i][j][k] = 2.0 * A[t % 2][i][j][k] - A[(t + 1) % 2][i][j][k] + roc2[i][j][k] * ( coef0 * A[t % 2][i][j][k] + coef1 * (A[t % 2][i - 1][j][k] + A[t % 2][i + 1][j][k] + A[t % 2][i][j - 1][k] + A[t % 2][i][j + 1][k] + A[t % 2][i][j][k - 1] + A[t % 2][i][j][k + 1]) + coef2 * (A[t % 2][i - 2][j][k] + A[t % 2][i + 2][j][k] + A[t % 2][i][j - 2][k] + A[t % 2][i][j + 2][k] + A[t % 2][i][j][k - 2] + A[t % 2][i][j][k + 2]) + coef3 * (A[t % 2][i - 3][j][k] + A[t % 2][i + 3][j][k] + A[t % 2][i][j - 3][k] + A[t % 2][i][j + 3][k] + A[t % 2][i][j][k - 3] + A[t % 2][i][j][k + 3]) + coef4 * (A[t % 2][i - 4][j][k] + A[t % 2][i + 4][j][k] + A[t % 2][i][j - 4][k] + A[t % 2][i][j + 4][k] + A[t % 2][i][j][k - 4] + A[t % 2][i][j][k + 4])); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
GB_binop__first_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_uint64) // A.*B function (eWiseMult): GB (_AemultB_01__first_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__first_uint64) // A.*B function (eWiseMult): GB (_AemultB_03__first_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_uint64) // A*D function (colscale): GB (_AxD__first_uint64) // D*A function (rowscale): GB (_DxB__first_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__first_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__first_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_uint64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = aij #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_UINT64 || GxB_NO_FIRST_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__first_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__first_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__first_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_uint64) // A.*B function (eWiseMult): GB (_AemultB_01__first_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__first_uint64) // A.*B function (eWiseMult): GB (_AemultB_03__first_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_uint64) // A*D function (colscale): GB (_AxD__first_uint64) // D*A function (rowscale): GB (_DxB__first_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__first_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__first_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_uint64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = aij #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_UINT64 || GxB_NO_FIRST_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__first_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__first_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__first_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_uint64) // A.*B function (eWiseMult): GB (_AemultB_01__first_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__first_uint64) // A.*B function (eWiseMult): GB (_AemultB_03__first_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_uint64) // A*D function (colscale): GB (_AxD__first_uint64) // D*A function (rowscale): GB (_DxB__first_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__first_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__first_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_uint64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = aij #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_UINT64 || GxB_NO_FIRST_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__first_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__first_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__first_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(4*t2-Nz-4,8));t3<=min(min(min(floord(4*t2+Ny,8),floord(Nt+Ny-4,8)),floord(2*t1+Ny+1,8)),floord(4*t1-4*t2+Nz+Ny-1,8));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(4*t2-Nz-28,32)),ceild(8*t3-Ny-28,32));t4<=min(min(min(min(floord(4*t2+Nx,32),floord(Nt+Nx-4,32)),floord(2*t1+Nx+1,32)),floord(8*t3+Nx+4,32)),floord(4*t1-4*t2+Nz+Nx-1,32));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),8*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),8*t3+6),32*t4+30),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(32*t4,t5+1); ubv=min(32*t4+31,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(4*t2-Nz-4,8));t3<=min(min(min(floord(4*t2+Ny,8),floord(Nt+Ny-4,8)),floord(2*t1+Ny+1,8)),floord(4*t1-4*t2+Nz+Ny-1,8));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(4*t2-Nz-28,32)),ceild(8*t3-Ny-28,32));t4<=min(min(min(min(floord(4*t2+Nx,32),floord(Nt+Nx-4,32)),floord(2*t1+Nx+1,32)),floord(8*t3+Nx+4,32)),floord(4*t1-4*t2+Nz+Nx-1,32));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),8*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),8*t3+6),32*t4+30),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(32*t4,t5+1); ubv=min(32*t4+31,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(4*t2-Nz-4,8));t3<=min(min(min(floord(4*t2+Ny,8),floord(Nt+Ny-4,8)),floord(2*t1+Ny+1,8)),floord(4*t1-4*t2+Nz+Ny-1,8));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(4*t2-Nz-28,32)),ceild(8*t3-Ny-28,32));t4<=min(min(min(min(floord(4*t2+Nx,32),floord(Nt+Nx-4,32)),floord(2*t1+Nx+1,32)),floord(8*t3+Nx+4,32)),floord(4*t1-4*t2+Nz+Nx-1,32));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),8*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),8*t3+6),32*t4+30),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(32*t4,t5+1); ubv=min(32*t4+31,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(16*t2-Nz+5,8)),t1),2*t1-2*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(8*t1+Ny+7,8)),floord(16*t2+Ny+3,8)),floord(16*t1-16*t2+Nz+Ny+5,8));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(16*t2-Nz-1011,1024)),ceild(8*t3-Ny-1011,1024));t4<=min(min(min(min(floord(4*Nt+Nx-9,1024),floord(8*t1+Nx+7,1024)),floord(16*t2+Nx+3,1024)),floord(8*t3+Nx-5,1024)),floord(16*t1-16*t2+Nz+Nx+5,1024));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),2*t3),Nt-1),2*t1+3),4*t2+2),256*t4+254);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(1024*t4,4*t5+4); ubv=min(1024*t4+1023,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****)malloc(sizeof(double ***) * 2); double ***roc2 = (double ***)malloc(sizeof(double **)); A[0] = (double ***)malloc(sizeof(double **) * Nz); A[1] = (double ***)malloc(sizeof(double **) * Nz); roc2 = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[0][i] = (double **)malloc(sizeof(double *) * Ny); A[1][i] = (double **)malloc(sizeof(double *) * Ny); roc2[i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[0][i][j] = (double *)malloc(sizeof(double) * Nx); A[1][i][j] = (double *)malloc(sizeof(double) * Nx); roc2[i][j] = (double *)malloc(sizeof(double) * Nx); } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 1024; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= floord(Nt - 1, 2); t1++) { lbp = max(ceild(t1, 2), ceild(4 * t1 - Nt + 2, 4)); ubp = min(floord(4 * Nt + Nz - 9, 16), floord(8 * t1 + Nz + 2, 16)); for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(max(max(0, ceild(16 * t2 - Nz + 5, 8)), t1), 2 * t1 - 2 * t2 + 1); t3 <= min(min(min(floord(4 * Nt + Ny - 9, 8), floord(8 * t1 + Ny + 7, 8)), floord(16 * t2 + Ny + 3, 8)), floord(16 * t1 - 16 * t2 + Nz + Ny + 5, 8)); t3++) { for (t4 = max(max(max(0, ceild(t1 - 127, 128)), ceild(16 * t2 - Nz - 1011, 1024)), ceild(8 * t3 - Ny - 1011, 1024)); t4 <= min(min(min(min(floord(4 * Nt + Nx - 9, 1024), floord(8 * t1 + Nx + 7, 1024)), floord(16 * t2 + Nx + 3, 1024)), floord(8 * t3 + Nx - 5, 1024)), floord(16 * t1 - 16 * t2 + Nz + Nx + 5, 1024)); t4++) { for (t5 = max(max(max(max(max(0, ceild(16 * t2 - Nz + 5, 4)), ceild(8 * t3 - Ny + 5, 4)), ceild(1024 * t4 - Nx + 5, 4)), 2 * t1), 4 * t1 - 4 * t2 + 1); t5 <= min(min(min(min(min(floord(16 * t1 - 16 * t2 + Nz + 10, 4), 2 * t3), Nt - 1), 2 * t1 + 3), 4 * t2 + 2), 256 * t4 + 254); t5++) { for (t6 = max(max(16 * t2, 4 * t5 + 4), -16 * t1 + 16 * t2 + 8 * t5 - 15); t6 <= min(min(16 * t2 + 15, -16 * t1 + 16 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = max(8 * t3, 4 * t5 + 4); t7 <= min(8 * t3 + 7, 4 * t5 + Ny - 5); t7++) { lbv = max(1024 * t4, 4 * t5 + 4); ubv = min(1024 * t4 + 1023, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((2.0 * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) - A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (roc2[(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (((((coef0 * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef1 * (((((A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef2 * (((((A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef3 * (((((A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef4 * (((((A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****)malloc(sizeof(double ***) * 2); double ***roc2 = (double ***)malloc(sizeof(double **)); A[0] = (double ***)malloc(sizeof(double **) * Nz); A[1] = (double ***)malloc(sizeof(double **) * Nz); roc2 = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[0][i] = (double **)malloc(sizeof(double *) * Ny); A[1][i] = (double **)malloc(sizeof(double *) * Ny); roc2[i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[0][i][j] = (double *)malloc(sizeof(double) * Nx); A[1][i][j] = (double *)malloc(sizeof(double) * Nx); roc2[i][j] = (double *)malloc(sizeof(double) * Nx); } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 1024; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= floord(Nt - 1, 2); t1++) { lbp = max(ceild(t1, 2), ceild(4 * t1 - Nt + 2, 4)); ubp = min(floord(4 * Nt + Nz - 9, 16), floord(8 * t1 + Nz + 2, 16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(max(max(0, ceild(16 * t2 - Nz + 5, 8)), t1), 2 * t1 - 2 * t2 + 1); t3 <= min(min(min(floord(4 * Nt + Ny - 9, 8), floord(8 * t1 + Ny + 7, 8)), floord(16 * t2 + Ny + 3, 8)), floord(16 * t1 - 16 * t2 + Nz + Ny + 5, 8)); t3++) { for (t4 = max(max(max(0, ceild(t1 - 127, 128)), ceild(16 * t2 - Nz - 1011, 1024)), ceild(8 * t3 - Ny - 1011, 1024)); t4 <= min(min(min(min(floord(4 * Nt + Nx - 9, 1024), floord(8 * t1 + Nx + 7, 1024)), floord(16 * t2 + Nx + 3, 1024)), floord(8 * t3 + Nx - 5, 1024)), floord(16 * t1 - 16 * t2 + Nz + Nx + 5, 1024)); t4++) { for (t5 = max(max(max(max(max(0, ceild(16 * t2 - Nz + 5, 4)), ceild(8 * t3 - Ny + 5, 4)), ceild(1024 * t4 - Nx + 5, 4)), 2 * t1), 4 * t1 - 4 * t2 + 1); t5 <= min(min(min(min(min(floord(16 * t1 - 16 * t2 + Nz + 10, 4), 2 * t3), Nt - 1), 2 * t1 + 3), 4 * t2 + 2), 256 * t4 + 254); t5++) { for (t6 = max(max(16 * t2, 4 * t5 + 4), -16 * t1 + 16 * t2 + 8 * t5 - 15); t6 <= min(min(16 * t2 + 15, -16 * t1 + 16 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = max(8 * t3, 4 * t5 + 4); t7 <= min(8 * t3 + 7, 4 * t5 + Ny - 5); t7++) { lbv = max(1024 * t4, 4 * t5 + 4); ubv = min(1024 * t4 + 1023, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((2.0 * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) - A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (roc2[(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (((((coef0 * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef1 * (((((A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef2 * (((((A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef3 * (((((A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef4 * (((((A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
perfect_number.c
/* vim: set ft=c sw=4 ts=4: */ /* perfect_number.c * Perfect number calculation in C */ #include "perfect_number.h" #include <stdio.h> #include "dynarr/DG_dynarr.h" /** perfect number predicate */ bool is_perfect(unsigned n) { unsigned sum = 0; for (unsigned i = 1; i < n; i++) { if (n % i == 0) sum += i; } return (sum == n); } /** find perfect number < limit */ unsigned perfect_numbers(PerfectNumbers *pn, unsigned limit) { da_init(*pn); unsigned i; #pragma omp parallel for private(i), schedule(dynamic) for (i = 1; i < limit; i++) { if (is_perfect(i)) { da_push(*pn, i); } } return da_count(*pn); } /** pretty-print perfect numbers */ void print_perfect_numbers(PerfectNumbers const *const pn) { unsigned len = da_count(*pn); printf("{"); if (len > 0) printf(" %d", pn->p[0]); for (unsigned i = 1; i < len; i++) { printf(", %d", pn->p[i]); } printf(" }"); }
/* vim: set ft=c sw=4 ts=4: */ /* * perfect_number.c Perfect number calculation in C */ #include "perfect_number.h" #include <stdio.h> #include "dynarr/DG_dynarr.h" /** perfect number predicate */ bool is_perfect(unsigned n) { unsigned sum = 0; for (unsigned i = 1; i < n; i++) { if (n % i == 0) sum += i; } return (sum == n); } /** find perfect number < limit */ unsigned perfect_numbers(PerfectNumbers * pn, unsigned limit) { da_init(*pn); unsigned i; for (i = 1; i < limit; i++) { if (is_perfect(i)) { da_push(*pn, i); } } return da_count(*pn); } /** pretty-print perfect numbers */ void print_perfect_numbers(PerfectNumbers const *const pn) { unsigned len = da_count(*pn); printf("{"); if (len > 0) printf(" %d", pn->p[0]); for (unsigned i = 1; i < len; i++) { printf(", %d", pn->p[i]); } printf(" }"); }
/* vim: set ft=c sw=4 ts=4: */ /* * perfect_number.c Perfect number calculation in C */ #include "perfect_number.h" #include <stdio.h> #include "dynarr/DG_dynarr.h" /** perfect number predicate */ bool is_perfect(unsigned n) { unsigned sum = 0; for (unsigned i = 1; i < n; i++) { if (n % i == 0) sum += i; } return (sum == n); } /** find perfect number < limit */ unsigned perfect_numbers(PerfectNumbers * pn, unsigned limit) { da_init(*pn); unsigned i; #pragma omp parallel for private(i), schedule(dynamic) for (i = 1; i < limit; i++) { if (is_perfect(i)) { da_push(*pn, i); } } return da_count(*pn); } /** pretty-print perfect numbers */ void print_perfect_numbers(PerfectNumbers const *const pn) { unsigned len = da_count(*pn); printf("{"); if (len > 0) printf(" %d", pn->p[0]); for (unsigned i = 1; i < len; i++) { printf(", %d", pn->p[i]); } printf(" }"); }
top_k_v2_op.h
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* The reason why we need the topk v2 is because the compatibility. We redefine the NaN is maximum value in the process of comparing. If do not add the topk v2, will affect the inference result of model that traing by the older version paddlepaddle. */ #pragma once #include <algorithm> #include <iostream> #include <utility> #include <vector> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/top_k_op.h" #include "paddle/fluid/operators/transpose_op.h" namespace paddle { namespace operators { inline void GetDims(const framework::DDim& dim, int axis, int* pre, int* n, int* post) { *pre = 1; *post = 1; *n = dim[axis]; for (int i = 0; i < axis; ++i) { (*pre) *= dim[i]; } for (int i = axis + 1; i < dim.size(); ++i) { (*post) *= dim[i]; } } template <typename T, typename Type> static void FullTopK(Type input_height, Type input_width, int input_dim, const framework::Tensor* input, T* t_out, Type* t_indices, const int& k, const bool& largest, const bool& sorted) { // when the k is small, will the partial sort bool partial_sort_flag = (k * 64) < input_width; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif // Eigen::DSizes<int, 2> flat2dims(input_height, input_width); for (Type i = 0; i < input_height; ++i) { std::vector<std::pair<T, Type>> col_vec; col_vec.reserve(input_width); if (input_dim == 1) { auto e_input = framework::EigenVector<T>::Flatten(*input); for (Type j = 0; j < input_width; ++j) { col_vec.emplace_back(std::pair<T, Type>(e_input(j), j)); } } else { auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1); for (Type j = 0; j < input_width; ++j) { col_vec.emplace_back(std::pair<T, Type>(e_input(i, j), j)); } } if (partial_sort_flag) { std::partial_sort( col_vec.begin(), col_vec.begin() + k, col_vec.end(), [&largest](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { if (largest) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); } else { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); } }); } else { // use the nth-element to get the K-larger or K-small element if (largest) { std::nth_element( col_vec.begin(), col_vec.begin() + k - 1, col_vec.end(), [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); }); // the nth-element will get the unorder elements, sort the element if (sorted) { std::sort(col_vec.begin(), col_vec.begin() + k - 1, [&largest](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); }); } } else { std::nth_element( col_vec.begin(), col_vec.begin() + k - 1, col_vec.end(), [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); }); // the nth-element will get the unorder elements, sort the element if (sorted) { std::sort( col_vec.begin(), col_vec.begin() + k - 1, [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); }); } } } for (Type j = 0; j < k; ++j) { t_out[i * k + j] = col_vec[j].first; t_indices[i * k + j] = col_vec[j].second; } } } template <typename T, typename Type> static void FullTopKAssign(const Type& input_height, const Type& input_width, const int& input_dim, const framework::Tensor* input, const framework::Tensor* indices, T* output_data, const int& k) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (Type i = 0; i < input_height; ++i) { if (input_dim == 1) { auto e_input = framework::EigenVector<T>::Flatten(*input); auto e_indices = framework::EigenVector<Type>::Flatten(*indices); for (Type j = 0; j < k; ++j) { output_data[i * input_width + e_indices(j)] = e_input(j); } } else { auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1); auto e_indices = framework::EigenMatrix<Type>::Reshape(*indices, input_dim - 1); for (Type j = 0; j < k; ++j) { output_data[i * input_width + e_indices(i, j)] = e_input(i, j); } } } } template <typename DeviceContext, typename T> class TopkV2Kernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { // Get the top k elements of each row of input tensor auto* input = context.Input<Tensor>("X"); auto* output = context.Output<Tensor>("Out"); auto* indices = context.Output<Tensor>("Indices"); const auto& in_dims = input->dims(); int k = static_cast<int>(context.Attr<int>("k")); const auto& sorted = static_cast<bool>(context.Attr<bool>("sorted")); const auto& largest = static_cast<bool>(context.Attr<bool>("largest")); // axis < 0, cacluate the real axis int axis = static_cast<int>(context.Attr<int>("axis")); if (axis < 0) axis += in_dims.size(); // if K tensor is not null, will the use K tesnor as k auto* k_t = context.Input<Tensor>("K"); if (k_t) { k = k_t->data<int>()[0]; framework::DDim output_dims = output->dims(); // accroding to axis to set K value in the dim output_dims[axis] = k; output->Resize(output_dims); indices->Resize(output_dims); } T* output_data = output->mutable_data<T>(context.GetPlace()); int64_t* indices_data = indices->mutable_data<int64_t>(context.GetPlace()); const auto& out_dims = output->dims(); if (axis + 1 == in_dims.size()) { const int64_t& input_height = framework::product( framework::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t& input_width = in_dims[in_dims.size() - 1]; FullTopK<T, int64_t>(input_height, input_width, in_dims.size(), input, output_data, indices_data, k, largest, sorted); } else { // if the topk dims is not last dim, will tranpose and do topk std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.push_back(in_dims.size() - 1); for (int i = axis + 1; i < in_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); // get the trans input_dims, out_dims framework::DDim trans_dims(in_dims); framework::DDim trans_out_dims(output->dims()); for (size_t i = 0; i < trans.size(); i++) { trans_dims[i] = in_dims[trans[i]]; } for (size_t i = 0; i < trans.size(); i++) { trans_out_dims[i] = out_dims[trans[i]]; } Tensor trans_inp; trans_inp.mutable_data<T>(trans_dims, context.GetPlace()); int ndims = trans.size(); auto& dev_context = context.template device_context<platform::CPUDeviceContext>(); // transpose the input value TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *input, &trans_inp, trans); const int64_t input_height = framework::product( framework::slice_ddim(trans_dims, 0, trans_dims.size() - 1)); const int64_t input_width = trans_dims[trans_dims.size() - 1]; // Allocate the temp tensor to the save the topk indices, values Tensor tmp_out; T* t_out = tmp_out.mutable_data<T>(trans_out_dims, context.GetPlace()); Tensor tmp_indices; auto* t_ind = tmp_indices.mutable_data<int64_t>(trans_out_dims, context.GetPlace()); // get the TopK value FullTopK<T, int64_t>(input_height, input_width, in_dims.size(), &trans_inp, t_out, t_ind, k, largest, sorted); // transpose back TransCompute<platform::CPUDeviceContext, int64_t>( ndims, dev_context, tmp_indices, indices, trans); TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out, output, trans); } } }; template <typename DeviceContext, typename T> class TopkV2GradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input<Tensor>("X"); auto* out_grad = context.Input<Tensor>(framework::GradVarName("Out")); auto* indices = context.Input<Tensor>("Indices"); auto* x_grad = context.Output<Tensor>(framework::GradVarName("X")); int axis = static_cast<int>(context.Attr<int>("axis")); const auto& in_dims = x->dims(); const auto& out_dims = indices->dims(); // axis < 0, get the real axis axis = (axis < 0) ? (in_dims.size() + axis) : axis; const size_t& k = out_dims[axis]; T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace()); if (axis + 1 == in_dims.size()) { // allocate the memory for the input_grad // assign the out_grad to input_grad directly const int64_t input_height = framework::product( framework::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t input_width = in_dims[in_dims.size() - 1]; // init the output grad with 0, because some input elements has no grad memset(x_grad_data, 0, x_grad->numel() * sizeof(T)); // Assign the output_grad to input_grad FullTopKAssign(input_height, input_width, in_dims.size(), out_grad, indices, x_grad_data, k); } else { // can not assign grad to input_grad, must do the transpose std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.emplace_back(out_dims.size() - 1); for (int i = axis + 1; i < out_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); framework::DDim trans_dims(out_dims); framework::DDim trans_in_dims(in_dims); for (size_t i = 0; i < trans.size(); i++) { trans_dims[i] = out_dims[trans[i]]; trans_in_dims[i] = in_dims[trans[i]]; } // transpose the out_grad, indices Tensor trans_dO; trans_dO.mutable_data<T>(trans_dims, context.GetPlace()); Tensor trans_ind; trans_ind.mutable_data<int64_t>(trans_dims, context.GetPlace()); int ndims = trans.size(); auto& dev_context = context.template device_context<platform::CPUDeviceContext>(); // Do transpose TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *out_grad, &trans_dO, trans); TransCompute<platform::CPUDeviceContext, int64_t>( ndims, dev_context, *indices, &trans_ind, trans); const int64_t input_height = framework::product( framework::slice_ddim(trans_in_dims, 0, trans_in_dims.size() - 1)); const int64_t input_width = trans_in_dims[trans_in_dims.size() - 1]; // Assign the out_grad to tranpose input_grad Tensor tmp_out; T* t_out = tmp_out.mutable_data<T>(trans_in_dims, context.GetPlace()); memset(t_out, 0, x_grad->numel() * sizeof(T)); FullTopKAssign<T, int64_t>(input_height, input_width, in_dims.size(), &trans_dO, &trans_ind, t_out, k); // Transpose back TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out, x_grad, trans); } } }; } // namespace operators } // namespace paddle
/* The reason why we need the topk v2 is because the compatibility. We redefine the NaN is maximum value in the process of comparing. If do not add the topk v2, will affect the inference result of model that traing by the older version paddlepaddle. */ #pragma once #include <algorithm> #include <iostream> #include <utility> #include <vector> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/top_k_op.h" #include "paddle/fluid/operators/transpose_op.h" namespace paddle { namespace operators { inline void GetDims(const framework::DDim& dim, int axis, int* pre, int* n, int* post) { *pre = 1; *post = 1; *n = dim[axis]; for (int i = 0; i < axis; ++i) { (*pre) *= dim[i]; } for (int i = axis + 1; i < dim.size(); ++i) { (*post) *= dim[i]; } } template <typename T, typename Type> static void FullTopK(Type input_height, Type input_width, int input_dim, const framework::Tensor* input, T* t_out, Type* t_indices, const int& k, const bool& largest, const bool& sorted) { // when the k is small, will the partial sort bool partial_sort_flag = (k * 64) < input_width; #ifdef PADDLE_WITH_MKLML #endif // Eigen::DSizes<int, 2> flat2dims(input_height, input_width); for (Type i = 0; i < input_height; ++i) { std::vector<std::pair<T, Type>> col_vec; col_vec.reserve(input_width); if (input_dim == 1) { auto e_input = framework::EigenVector<T>::Flatten(*input); for (Type j = 0; j < input_width; ++j) { col_vec.emplace_back(std::pair<T, Type>(e_input(j), j)); } } else { auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1); for (Type j = 0; j < input_width; ++j) { col_vec.emplace_back(std::pair<T, Type>(e_input(i, j), j)); } } if (partial_sort_flag) { std::partial_sort( col_vec.begin(), col_vec.begin() + k, col_vec.end(), [&largest](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { if (largest) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); } else { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); } }); } else { // use the nth-element to get the K-larger or K-small element if (largest) { std::nth_element( col_vec.begin(), col_vec.begin() + k - 1, col_vec.end(), [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); }); // the nth-element will get the unorder elements, sort the element if (sorted) { std::sort(col_vec.begin(), col_vec.begin() + k - 1, [&largest](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); }); } } else { std::nth_element( col_vec.begin(), col_vec.begin() + k - 1, col_vec.end(), [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); }); // the nth-element will get the unorder elements, sort the element if (sorted) { std::sort( col_vec.begin(), col_vec.begin() + k - 1, [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); }); } } } for (Type j = 0; j < k; ++j) { t_out[i * k + j] = col_vec[j].first; t_indices[i * k + j] = col_vec[j].second; } } } template <typename T, typename Type> static void FullTopKAssign(const Type& input_height, const Type& input_width, const int& input_dim, const framework::Tensor* input, const framework::Tensor* indices, T* output_data, const int& k) { #ifdef PADDLE_WITH_MKLML #endif for (Type i = 0; i < input_height; ++i) { if (input_dim == 1) { auto e_input = framework::EigenVector<T>::Flatten(*input); auto e_indices = framework::EigenVector<Type>::Flatten(*indices); for (Type j = 0; j < k; ++j) { output_data[i * input_width + e_indices(j)] = e_input(j); } } else { auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1); auto e_indices = framework::EigenMatrix<Type>::Reshape(*indices, input_dim - 1); for (Type j = 0; j < k; ++j) { output_data[i * input_width + e_indices(i, j)] = e_input(i, j); } } } } template <typename DeviceContext, typename T> class TopkV2Kernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { // Get the top k elements of each row of input tensor auto* input = context.Input<Tensor>("X"); auto* output = context.Output<Tensor>("Out"); auto* indices = context.Output<Tensor>("Indices"); const auto& in_dims = input->dims(); int k = static_cast<int>(context.Attr<int>("k")); const auto& sorted = static_cast<bool>(context.Attr<bool>("sorted")); const auto& largest = static_cast<bool>(context.Attr<bool>("largest")); // axis < 0, cacluate the real axis int axis = static_cast<int>(context.Attr<int>("axis")); if (axis < 0) axis += in_dims.size(); // if K tensor is not null, will the use K tesnor as k auto* k_t = context.Input<Tensor>("K"); if (k_t) { k = k_t->data<int>()[0]; framework::DDim output_dims = output->dims(); // accroding to axis to set K value in the dim output_dims[axis] = k; output->Resize(output_dims); indices->Resize(output_dims); } T* output_data = output->mutable_data<T>(context.GetPlace()); int64_t* indices_data = indices->mutable_data<int64_t>(context.GetPlace()); const auto& out_dims = output->dims(); if (axis + 1 == in_dims.size()) { const int64_t& input_height = framework::product( framework::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t& input_width = in_dims[in_dims.size() - 1]; FullTopK<T, int64_t>(input_height, input_width, in_dims.size(), input, output_data, indices_data, k, largest, sorted); } else { // if the topk dims is not last dim, will tranpose and do topk std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.push_back(in_dims.size() - 1); for (int i = axis + 1; i < in_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); // get the trans input_dims, out_dims framework::DDim trans_dims(in_dims); framework::DDim trans_out_dims(output->dims()); for (size_t i = 0; i < trans.size(); i++) { trans_dims[i] = in_dims[trans[i]]; } for (size_t i = 0; i < trans.size(); i++) { trans_out_dims[i] = out_dims[trans[i]]; } Tensor trans_inp; trans_inp.mutable_data<T>(trans_dims, context.GetPlace()); int ndims = trans.size(); auto& dev_context = context.template device_context<platform::CPUDeviceContext>(); // transpose the input value TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *input, &trans_inp, trans); const int64_t input_height = framework::product( framework::slice_ddim(trans_dims, 0, trans_dims.size() - 1)); const int64_t input_width = trans_dims[trans_dims.size() - 1]; // Allocate the temp tensor to the save the topk indices, values Tensor tmp_out; T* t_out = tmp_out.mutable_data<T>(trans_out_dims, context.GetPlace()); Tensor tmp_indices; auto* t_ind = tmp_indices.mutable_data<int64_t>(trans_out_dims, context.GetPlace()); // get the TopK value FullTopK<T, int64_t>(input_height, input_width, in_dims.size(), &trans_inp, t_out, t_ind, k, largest, sorted); // transpose back TransCompute<platform::CPUDeviceContext, int64_t>( ndims, dev_context, tmp_indices, indices, trans); TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out, output, trans); } } }; template <typename DeviceContext, typename T> class TopkV2GradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input<Tensor>("X"); auto* out_grad = context.Input<Tensor>(framework::GradVarName("Out")); auto* indices = context.Input<Tensor>("Indices"); auto* x_grad = context.Output<Tensor>(framework::GradVarName("X")); int axis = static_cast<int>(context.Attr<int>("axis")); const auto& in_dims = x->dims(); const auto& out_dims = indices->dims(); // axis < 0, get the real axis axis = (axis < 0) ? (in_dims.size() + axis) : axis; const size_t& k = out_dims[axis]; T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace()); if (axis + 1 == in_dims.size()) { // allocate the memory for the input_grad // assign the out_grad to input_grad directly const int64_t input_height = framework::product( framework::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t input_width = in_dims[in_dims.size() - 1]; // init the output grad with 0, because some input elements has no grad memset(x_grad_data, 0, x_grad->numel() * sizeof(T)); // Assign the output_grad to input_grad FullTopKAssign(input_height, input_width, in_dims.size(), out_grad, indices, x_grad_data, k); } else { // can not assign grad to input_grad, must do the transpose std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.emplace_back(out_dims.size() - 1); for (int i = axis + 1; i < out_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); framework::DDim trans_dims(out_dims); framework::DDim trans_in_dims(in_dims); for (size_t i = 0; i < trans.size(); i++) { trans_dims[i] = out_dims[trans[i]]; trans_in_dims[i] = in_dims[trans[i]]; } // transpose the out_grad, indices Tensor trans_dO; trans_dO.mutable_data<T>(trans_dims, context.GetPlace()); Tensor trans_ind; trans_ind.mutable_data<int64_t>(trans_dims, context.GetPlace()); int ndims = trans.size(); auto& dev_context = context.template device_context<platform::CPUDeviceContext>(); // Do transpose TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *out_grad, &trans_dO, trans); TransCompute<platform::CPUDeviceContext, int64_t>( ndims, dev_context, *indices, &trans_ind, trans); const int64_t input_height = framework::product( framework::slice_ddim(trans_in_dims, 0, trans_in_dims.size() - 1)); const int64_t input_width = trans_in_dims[trans_in_dims.size() - 1]; // Assign the out_grad to tranpose input_grad Tensor tmp_out; T* t_out = tmp_out.mutable_data<T>(trans_in_dims, context.GetPlace()); memset(t_out, 0, x_grad->numel() * sizeof(T)); FullTopKAssign<T, int64_t>(input_height, input_width, in_dims.size(), &trans_dO, &trans_ind, t_out, k); // Transpose back TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out, x_grad, trans); } } }; } // namespace operators } // namespace paddle
/* The reason why we need the topk v2 is because the compatibility. We redefine the NaN is maximum value in the process of comparing. If do not add the topk v2, will affect the inference result of model that traing by the older version paddlepaddle. */ #pragma once #include <algorithm> #include <iostream> #include <utility> #include <vector> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/top_k_op.h" #include "paddle/fluid/operators/transpose_op.h" namespace paddle { namespace operators { inline void GetDims(const framework::DDim& dim, int axis, int* pre, int* n, int* post) { *pre = 1; *post = 1; *n = dim[axis]; for (int i = 0; i < axis; ++i) { (*pre) *= dim[i]; } for (int i = axis + 1; i < dim.size(); ++i) { (*post) *= dim[i]; } } template <typename T, typename Type> static void FullTopK(Type input_height, Type input_width, int input_dim, const framework::Tensor* input, T* t_out, Type* t_indices, const int& k, const bool& largest, const bool& sorted) { // when the k is small, will the partial sort bool partial_sort_flag = (k * 64) < input_width; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif // Eigen::DSizes<int, 2> flat2dims(input_height, input_width); for (Type i = 0; i < input_height; ++i) { std::vector<std::pair<T, Type>> col_vec; col_vec.reserve(input_width); if (input_dim == 1) { auto e_input = framework::EigenVector<T>::Flatten(*input); for (Type j = 0; j < input_width; ++j) { col_vec.emplace_back(std::pair<T, Type>(e_input(j), j)); } } else { auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1); for (Type j = 0; j < input_width; ++j) { col_vec.emplace_back(std::pair<T, Type>(e_input(i, j), j)); } } if (partial_sort_flag) { std::partial_sort( col_vec.begin(), col_vec.begin() + k, col_vec.end(), [&largest](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { if (largest) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); } else { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); } }); } else { // use the nth-element to get the K-larger or K-small element if (largest) { std::nth_element( col_vec.begin(), col_vec.begin() + k - 1, col_vec.end(), [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); }); // the nth-element will get the unorder elements, sort the element if (sorted) { std::sort(col_vec.begin(), col_vec.begin() + k - 1, [&largest](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); }); } } else { std::nth_element( col_vec.begin(), col_vec.begin() + k - 1, col_vec.end(), [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); }); // the nth-element will get the unorder elements, sort the element if (sorted) { std::sort( col_vec.begin(), col_vec.begin() + k - 1, [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); }); } } } for (Type j = 0; j < k; ++j) { t_out[i * k + j] = col_vec[j].first; t_indices[i * k + j] = col_vec[j].second; } } } template <typename T, typename Type> static void FullTopKAssign(const Type& input_height, const Type& input_width, const int& input_dim, const framework::Tensor* input, const framework::Tensor* indices, T* output_data, const int& k) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (Type i = 0; i < input_height; ++i) { if (input_dim == 1) { auto e_input = framework::EigenVector<T>::Flatten(*input); auto e_indices = framework::EigenVector<Type>::Flatten(*indices); for (Type j = 0; j < k; ++j) { output_data[i * input_width + e_indices(j)] = e_input(j); } } else { auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1); auto e_indices = framework::EigenMatrix<Type>::Reshape(*indices, input_dim - 1); for (Type j = 0; j < k; ++j) { output_data[i * input_width + e_indices(i, j)] = e_input(i, j); } } } } template <typename DeviceContext, typename T> class TopkV2Kernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { // Get the top k elements of each row of input tensor auto* input = context.Input<Tensor>("X"); auto* output = context.Output<Tensor>("Out"); auto* indices = context.Output<Tensor>("Indices"); const auto& in_dims = input->dims(); int k = static_cast<int>(context.Attr<int>("k")); const auto& sorted = static_cast<bool>(context.Attr<bool>("sorted")); const auto& largest = static_cast<bool>(context.Attr<bool>("largest")); // axis < 0, cacluate the real axis int axis = static_cast<int>(context.Attr<int>("axis")); if (axis < 0) axis += in_dims.size(); // if K tensor is not null, will the use K tesnor as k auto* k_t = context.Input<Tensor>("K"); if (k_t) { k = k_t->data<int>()[0]; framework::DDim output_dims = output->dims(); // accroding to axis to set K value in the dim output_dims[axis] = k; output->Resize(output_dims); indices->Resize(output_dims); } T* output_data = output->mutable_data<T>(context.GetPlace()); int64_t* indices_data = indices->mutable_data<int64_t>(context.GetPlace()); const auto& out_dims = output->dims(); if (axis + 1 == in_dims.size()) { const int64_t& input_height = framework::product( framework::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t& input_width = in_dims[in_dims.size() - 1]; FullTopK<T, int64_t>(input_height, input_width, in_dims.size(), input, output_data, indices_data, k, largest, sorted); } else { // if the topk dims is not last dim, will tranpose and do topk std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.push_back(in_dims.size() - 1); for (int i = axis + 1; i < in_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); // get the trans input_dims, out_dims framework::DDim trans_dims(in_dims); framework::DDim trans_out_dims(output->dims()); for (size_t i = 0; i < trans.size(); i++) { trans_dims[i] = in_dims[trans[i]]; } for (size_t i = 0; i < trans.size(); i++) { trans_out_dims[i] = out_dims[trans[i]]; } Tensor trans_inp; trans_inp.mutable_data<T>(trans_dims, context.GetPlace()); int ndims = trans.size(); auto& dev_context = context.template device_context<platform::CPUDeviceContext>(); // transpose the input value TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *input, &trans_inp, trans); const int64_t input_height = framework::product( framework::slice_ddim(trans_dims, 0, trans_dims.size() - 1)); const int64_t input_width = trans_dims[trans_dims.size() - 1]; // Allocate the temp tensor to the save the topk indices, values Tensor tmp_out; T* t_out = tmp_out.mutable_data<T>(trans_out_dims, context.GetPlace()); Tensor tmp_indices; auto* t_ind = tmp_indices.mutable_data<int64_t>(trans_out_dims, context.GetPlace()); // get the TopK value FullTopK<T, int64_t>(input_height, input_width, in_dims.size(), &trans_inp, t_out, t_ind, k, largest, sorted); // transpose back TransCompute<platform::CPUDeviceContext, int64_t>( ndims, dev_context, tmp_indices, indices, trans); TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out, output, trans); } } }; template <typename DeviceContext, typename T> class TopkV2GradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input<Tensor>("X"); auto* out_grad = context.Input<Tensor>(framework::GradVarName("Out")); auto* indices = context.Input<Tensor>("Indices"); auto* x_grad = context.Output<Tensor>(framework::GradVarName("X")); int axis = static_cast<int>(context.Attr<int>("axis")); const auto& in_dims = x->dims(); const auto& out_dims = indices->dims(); // axis < 0, get the real axis axis = (axis < 0) ? (in_dims.size() + axis) : axis; const size_t& k = out_dims[axis]; T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace()); if (axis + 1 == in_dims.size()) { // allocate the memory for the input_grad // assign the out_grad to input_grad directly const int64_t input_height = framework::product( framework::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t input_width = in_dims[in_dims.size() - 1]; // init the output grad with 0, because some input elements has no grad memset(x_grad_data, 0, x_grad->numel() * sizeof(T)); // Assign the output_grad to input_grad FullTopKAssign(input_height, input_width, in_dims.size(), out_grad, indices, x_grad_data, k); } else { // can not assign grad to input_grad, must do the transpose std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.emplace_back(out_dims.size() - 1); for (int i = axis + 1; i < out_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); framework::DDim trans_dims(out_dims); framework::DDim trans_in_dims(in_dims); for (size_t i = 0; i < trans.size(); i++) { trans_dims[i] = out_dims[trans[i]]; trans_in_dims[i] = in_dims[trans[i]]; } // transpose the out_grad, indices Tensor trans_dO; trans_dO.mutable_data<T>(trans_dims, context.GetPlace()); Tensor trans_ind; trans_ind.mutable_data<int64_t>(trans_dims, context.GetPlace()); int ndims = trans.size(); auto& dev_context = context.template device_context<platform::CPUDeviceContext>(); // Do transpose TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *out_grad, &trans_dO, trans); TransCompute<platform::CPUDeviceContext, int64_t>( ndims, dev_context, *indices, &trans_ind, trans); const int64_t input_height = framework::product( framework::slice_ddim(trans_in_dims, 0, trans_in_dims.size() - 1)); const int64_t input_width = trans_in_dims[trans_in_dims.size() - 1]; // Assign the out_grad to tranpose input_grad Tensor tmp_out; T* t_out = tmp_out.mutable_data<T>(trans_in_dims, context.GetPlace()); memset(t_out, 0, x_grad->numel() * sizeof(T)); FullTopKAssign<T, int64_t>(input_height, input_width, in_dims.size(), &trans_dO, &trans_ind, t_out, k); // Transpose back TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out, x_grad, trans); } } }; } // namespace operators } // namespace paddle
GB_binop__isgt_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isgt_uint8 // A.*B function (eWiseMult): GB_AemultB__isgt_uint8 // A*D function (colscale): GB_AxD__isgt_uint8 // D*A function (rowscale): GB_DxB__isgt_uint8 // C+=B function (dense accum): GB_Cdense_accumB__isgt_uint8 // C+=b function (dense accum): GB_Cdense_accumb__isgt_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_uint8 // C=scalar+B GB_bind1st__isgt_uint8 // C=scalar+B' GB_bind1st_tran__isgt_uint8 // C=A+scalar GB_bind2nd__isgt_uint8 // C=A'+scalar GB_bind2nd_tran__isgt_uint8 // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_UINT8 || GxB_NO_ISGT_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isgt_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isgt_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isgt_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__isgt_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isgt_uint8 // A.*B function (eWiseMult): GB_AemultB__isgt_uint8 // A*D function (colscale): GB_AxD__isgt_uint8 // D*A function (rowscale): GB_DxB__isgt_uint8 // C+=B function (dense accum): GB_Cdense_accumB__isgt_uint8 // C+=b function (dense accum): GB_Cdense_accumb__isgt_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_uint8 // C=scalar+B GB_bind1st__isgt_uint8 // C=scalar+B' GB_bind1st_tran__isgt_uint8 // C=A+scalar GB_bind2nd__isgt_uint8 // C=A'+scalar GB_bind2nd_tran__isgt_uint8 // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_UINT8 || GxB_NO_ISGT_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isgt_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isgt_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { uint8_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isgt_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__isgt_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isgt_uint8 // A.*B function (eWiseMult): GB_AemultB__isgt_uint8 // A*D function (colscale): GB_AxD__isgt_uint8 // D*A function (rowscale): GB_DxB__isgt_uint8 // C+=B function (dense accum): GB_Cdense_accumB__isgt_uint8 // C+=b function (dense accum): GB_Cdense_accumb__isgt_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_uint8 // C=scalar+B GB_bind1st__isgt_uint8 // C=scalar+B' GB_bind1st_tran__isgt_uint8 // C=A+scalar GB_bind2nd__isgt_uint8 // C=A'+scalar GB_bind2nd_tran__isgt_uint8 // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_UINT8 || GxB_NO_ISGT_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isgt_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isgt_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isgt_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__isgt_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
CSRMatrix.h
/* * CSRMatrix.h * * Created on: May 6, 2015 * Author: Michael Wegner (michael.wegner@student.kit.edu) */ #ifndef CSRMATRIX_H_ #define CSRMATRIX_H_ #include <vector> #include "../Globals.h" #include "AlgebraicGlobals.h" #include "Vector.h" #include "../graph/Graph.h" #include "../algebraic/SparseAccumulator.h" #include "../auxiliary/Timer.h" namespace NetworKit { /** * @ingroup algebraic * The CSRMatrix class represents a sparse matrix stored in CSR-Format (i.e. compressed sparse row). * If speed is important, use this CSRMatrix instead of the Matrix class. */ class CSRMatrix { private: std::vector<index> rowIdx; std::vector<index> columnIdx; std::vector<double> nonZeros; count nRows; count nCols; bool isSorted; double zero; /** * Quicksort algorithm on columnIdx between [@a left, @a right]. * @param left * @param right */ void quicksort(index left, index right); /** * Partitions columnIdx between [@a left, @a right] after selecting the pivot in the middle. * @param left * @param right * @return The pivot. */ index partition(index left, index right); /** * Binary search the sorted columnIdx vector between [@a left, @a right] for column @a j. * If @a j is not present, the index that is immediately left of the place where @a j would be * is returned. If * @param left * @param right * @param j * @return The position of column @a j in columnIdx or the element immediately to the left of the place where @a j * would be. */ index binarySearchColumns(index left, index right, index j) const; public: /** Default constructor */ CSRMatrix(); /** * Constructs the CSRMatrix with size @a dimension x @a dimension. * @param dimension Defines how many rows and columns this matrix has. * @param zero The zero element (default = 0.0). */ CSRMatrix(const count dimension, const double zero = 0.0); /** * Constructs the CSRMatrix with size @a nRows x @a nCols. * @param nRows Number of rows. * @param nCols Number of columns. * @param zero The zero element (default = 0.0). */ CSRMatrix(const count nRows, const count nCols, const double zero = 0.0); /** * Constructs the @a dimension x @a dimension Matrix from the elements at position @a positions with values @values. * @param dimension Defines how many rows and columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). * @param isSorted True, if the triplets are sorted per row. Default is false. */ CSRMatrix(const count dimension, const std::vector<Triplet>& triplets, const double zero = 0.0, bool isSorted = false); /** * Constructs the @a nRows x @a nCols Matrix from the elements at position @a positions with values @values. * @param nRows Defines how many rows this matrix has. * @param nCols Defines how many columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). * @param isSorted True, if the triplets are sorted per row. Default is false. */ CSRMatrix(const count nRows, const count nCols, const std::vector<Triplet>& triplets, const double zero = 0.0, bool isSorted = false); /** * Constructs the @a nRows x @a nCols Matrix from the elements stored in @a columnIdx and @a values. @a columnIdx and @a values store the colums and values by row. * @param nRows * @param nCols * @param columnIdx * @param values * @param zero The zero element (default is 0.0). * @param isSorted True if the column indices in @a columnIdx are sorted in every row. */ CSRMatrix(const count nRows, const count nCols, const std::vector<std::vector<index>> &columnIdx, const std::vector<std::vector<double>> &values, const double zero = 0.0, bool isSorted = false); /** * Constructs the @a nRows x @a nCols Matrix from the elements at position @a positions with values @values. * @param nRows Defines how many rows this matrix has. * @param nCols Defines how many columns this matrix has. * @param rowIdx The rowIdx vector of the CSR format. * @param columnIdx The columnIdx vector of the CSR format. * @param nonZeros The nonZero vector of the CSR format. Should be as long as the @a columnIdx vector. * @param zero The zero element (default is 0.0). * @param isSorted True, if the triplets are sorted per row. Default is false. */ CSRMatrix(const count nRows, const count nCols, const std::vector<index>& rowIdx, const std::vector<index>& columnIdx, const std::vector<double>& nonZeros, const double zero = 0.0, bool isSorted = false); /** Default copy constructor */ CSRMatrix (const CSRMatrix &other) = default; /** Default move constructor */ CSRMatrix (CSRMatrix &&other) = default; /** Default destructor */ virtual ~CSRMatrix() = default; /** Default move assignment operator */ CSRMatrix& operator=(CSRMatrix &&other) = default; /** Default copy assignment operator */ CSRMatrix& operator=(const CSRMatrix &other) = default; /** * Compares this matrix to @a other and returns true if the shape and zero element are the same as well as * all entries, otherwise returns false. * @param other */ bool operator==(const CSRMatrix& other) const { bool equal = nRows == other.nRows && nCols == other.nCols && zero == other.zero; if (equal) { forNonZeroElementsInRowOrder([&](index i, index j, double value) { if (other(i,j) != value) { equal = false; return; } }); } return equal; } /** * Compares this matrix to @a other and returns false if the shape and zero element are the same as well as * all entries, otherwise returns true. * @param other */ bool operator!=(const CSRMatrix& other) const { return !((*this) == other); } /** * @return Number of rows. */ inline count numberOfRows() const { return nRows; } /** * @return Number of columns. */ inline count numberOfColumns() const { return nCols; } /** * Returns the zero element of the matrix. */ inline double getZero() const { return zero; } /** * @param i The row index. * @return Number of non-zeros in row @a i. */ count nnzInRow(const index i) const; /** * @return Number of non-zeros in this matrix. */ count nnz() const; /** * @return Value at matrix position (i,j). */ double operator()(const index i, const index j) const; /** * Set the matrix at position (@a i, @a j) to @a value. * @note This operation can be linear in the number of non-zeros due to vector element movements */ void setValue(const index i, const index j, const double value); /** * Sorts the column indices in each row for faster access. */ void sort(); /** * @return True if the matrix is sorted, otherwise false. */ bool sorted() const; /** * @return Row @a i of this matrix as vector. */ Vector row(const index i) const; /** * @return Column @a j of this matrix as vector. */ Vector column(const index j) const; /** * @return The main diagonal of this matrix. */ Vector diagonal() const; /** * Adds this matrix to @a other and returns the result. * @return The sum of this matrix and @a other. */ CSRMatrix operator+(const CSRMatrix &other) const; /** * Adds @a other to this matrix. * @return Reference to this matrix. */ CSRMatrix& operator+=(const CSRMatrix &other); /** * Subtracts @a other from this matrix and returns the result. * @return The difference of this matrix and @a other. * */ CSRMatrix operator-(const CSRMatrix &other) const; /** * Subtracts @a other from this matrix. * @return Reference to this matrix. */ CSRMatrix& operator-=(const CSRMatrix &other); /** * Multiplies this matrix with a scalar specified in @a scalar and returns the result. * @return The result of multiplying this matrix with @a scalar. */ CSRMatrix operator*(const double &scalar) const; /** * Multiplies this matrix with a scalar specified in @a scalar. * @return Reference to this matrix. */ CSRMatrix& operator*=(const double &scalar); /** * Multiplies this matrix with @a vector and returns the result. * @return The result of multiplying this matrix with @a vector. */ Vector operator*(const Vector &vector) const; /** * Multiplies this matrix with @a other and returns the result in a new matrix. * @return The result of multiplying this matrix with @a other. */ CSRMatrix operator*(const CSRMatrix &other) const; /** * Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix. * @return The result of dividing this matrix by @a divisor. */ CSRMatrix operator/(const double &divisor) const; /** * Divides this matrix by a divisor specified in @a divisor. * @return Reference to this matrix. */ CSRMatrix& operator/=(const double &divisor); /** * Computes @a A @a binaryOp @a B on the elements of matrix @a A and matrix @a B. * @param A Sorted CSRMatrix. * @param B Sorted CSRMatrix. * @param binaryOp Function handling (double, double) -> double * @return @a A @a binaryOp @a B. * @note @a A and @a B must have the same dimensions and must be sorted. */ template<typename L> static CSRMatrix binaryOperator(const CSRMatrix &A, const CSRMatrix &B, L binaryOp); /** * Computes @a A^T * @a B. * @param A * @param B * @return @a A^T * @a B. * @note The number of rows of @a A must be equal to the number of rows of @a B. */ static CSRMatrix mTmMultiply(const CSRMatrix &A, const CSRMatrix &B); /** * Computes @a A * @a B^T. * @param A * @param B * @return @a A * @a B^T. * @note The number of columns of @a A must be equal to the number of columns of @a B. */ static CSRMatrix mmTMultiply(const CSRMatrix &A, const CSRMatrix &B); /** * Computes @a matrix^T * @a vector. * @param matrix * @param vector * @return @a matrix^T * @a vector. * @note The number of rows of @a matrix must be equal to the dimension of @a vector. */ static Vector mTvMultiply(const CSRMatrix &matrix, const Vector &vector); /** * Transposes this matrix and returns it. */ CSRMatrix transpose() const; /** * Extracts a matrix with rows and columns specified by @a rowIndices and @a columnIndices from this matrix. * The order of rows and columns is equal to the order in @a rowIndices and @a columnIndices. It is also * possible to specify a row or column more than once to get duplicates. * @param rowIndices * @param columnIndices */ CSRMatrix extract(const std::vector<index>& rowIndices, const std::vector<index>& columnIndices) const; /** * Assign the contents of the matrix @a source to this matrix at rows and columns specified by @a rowIndices and * @a columnIndices. That is, entry (i,j) of @a source is assigned to entry (rowIndices[i], columnIndices[j]) of * this matrix. Note that the dimensions of @rowIndices and @a columnIndices must coincide with the number of rows * and columns of @a source. * @param rowIndices * @param columnIndices * @param source */ void assign(const std::vector<index>& rowIndices, const std::vector<index>& columnIndices, const CSRMatrix& source); /** * Applies the unary function @a unaryElementFunction to each value in the matrix. Note that it must hold that the * function applied to the zero element of this matrix returns the zero element. * @param unaryElementFunction */ template<typename F> void apply(const F unaryElementFunction); /** * Compute the (weighted) adjacency matrix of the (weighted) Graph @a graph. * @param graph */ static CSRMatrix adjacencyMatrix(const Graph& graph, double zero = 0.0); /** * Creates a diagonal matrix with dimension equal to the dimension of the Vector @a diagonalElements. The values on * the diagonal are the ones stored in @a diagonalElements (i.e. D(i,i) = diagonalElements[i]). * @param diagonalElements */ static CSRMatrix diagonalMatrix(const Vector& diagonalElements, double zero = 0.0); /** * Returns the (weighted) incidence matrix of the (weighted) Graph @a graph. * @param graph */ static CSRMatrix incidenceMatrix(const Graph& graph, double zero = 0.0); /** * Compute the (weighted) Laplacian of the (weighted) Graph @a graph. * @param graph */ static CSRMatrix laplacianMatrix(const Graph& graph, double zero = 0.0); /** * Returns the (weighted) normalized Laplacian matrix of the (weighted) Graph @a graph * @param graph */ static CSRMatrix normalizedLaplacianMatrix(const Graph& graph, double zero = 0.0); /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template<typename L> void forNonZeroElementsInRow(index row, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template<typename L> void parallelForNonZeroElementsInRow(index row, L handle) const; /** * Iterate over all elements in row @a i in the matrix and call handle(index column, double value) */ template<typename L> void forElementsInRow(index i, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). */ template<typename L> void forNonZeroElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. */ template<typename L> void parallelForNonZeroElementsInRowOrder(L handle) const; }; template<typename L> inline CSRMatrix NetworKit::CSRMatrix::binaryOperator(const CSRMatrix &A, const CSRMatrix &B, L binaryOp) { assert(A.nRows == B.nRows && A.nCols == B.nCols); if (A.sorted() && B.sorted()) { std::vector<index> rowIdx(A.nRows+1); std::vector<std::vector<index>> columns(A.nRows); rowIdx[0] = 0; #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(A.nRows); ++i) { index k = A.rowIdx[i]; index l = B.rowIdx[i]; while (k < A.rowIdx[i+1] && l < B.rowIdx[i+1]) { if (A.columnIdx[k] < B.columnIdx[l]) { columns[i].push_back(A.columnIdx[k]); ++k; } else if (A.columnIdx[k] > B.columnIdx[l]) { columns[i].push_back(B.columnIdx[l]); ++l; } else { // A.columnIdx[k] == B.columnIdx[l] columns[i].push_back(A.columnIdx[k]); ++k; ++l; } ++rowIdx[i+1]; } while (k < A.rowIdx[i+1]) { columns[i].push_back(A.columnIdx[k]); ++k; ++rowIdx[i+1]; } while (l < B.rowIdx[i+1]) { columns[i].push_back(B.columnIdx[l]); ++l; ++rowIdx[i+1]; } } for (index i = 0; i < A.nRows; ++i) { rowIdx[i+1] += rowIdx[i]; } count nnz = rowIdx[A.nRows]; std::vector<index> columnIdx(nnz); std::vector<double> nonZeros(nnz, A.zero); #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(A.nRows); ++i) { for (index cIdx = rowIdx[i], j = 0; cIdx < rowIdx[i+1]; ++cIdx, ++j) { columnIdx[cIdx] = columns[i][j]; } columns[i].clear(); columns[i].resize(0); columns[i].shrink_to_fit(); } #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(A.nRows); ++i) { index k = A.rowIdx[i]; index l = B.rowIdx[i]; for (index cIdx = rowIdx[i]; cIdx < rowIdx[i+1]; ++cIdx) { if (k < A.rowIdx[i+1] && columnIdx[cIdx] == A.columnIdx[k]) { nonZeros[cIdx] = A.nonZeros[k]; ++k; } if (l < B.rowIdx[i+1] && columnIdx[cIdx] == B.columnIdx[l]) { nonZeros[cIdx] = binaryOp(nonZeros[cIdx], B.nonZeros[l]); ++l; } } } return CSRMatrix(A.nRows, A.nCols, rowIdx, columnIdx, nonZeros, A.zero, true); } else { // A or B not sorted std::vector<int64_t> columnPointer(A.nCols, -1); std::vector<double> Arow(A.nCols, A.zero); std::vector<double> Brow(A.nCols, B.zero); std::vector<Triplet> triplets; for (index i = 0; i < A.nRows; ++i) { index listHead = 0; count nnz = 0; // search for nonZeros in our own matrix for (index k = A.rowIdx[i]; k < A.rowIdx[i+1]; ++k) { index j = A.columnIdx[k]; Arow[j] = A.nonZeros[k]; columnPointer[j] = listHead; listHead = j; nnz++; } // search for nonZeros in the other matrix for (index k = B.rowIdx[i]; k < B.rowIdx[i+1]; ++k) { index j = B.columnIdx[k]; Brow[j] = B.nonZeros[k]; if (columnPointer[j] == -1) { // our own matrix does not have a nonZero entry in column j columnPointer[j] = listHead; listHead = j; nnz++; } } // apply operator on the found nonZeros in A and B for (count k = 0; k < nnz; ++k) { double value = binaryOp(Arow[listHead], Brow[listHead]); if (value != A.zero) { triplets.push_back({i,listHead,value}); } index temp = listHead; listHead = columnPointer[listHead]; // reset for next row columnPointer[temp] = -1; Arow[temp] = A.zero; Brow[temp] = B.zero; } nnz = 0; } return CSRMatrix(A.numberOfRows(), A.numberOfColumns(), triplets); } } template<typename F> void CSRMatrix::apply(const F unaryElementFunction) { #pragma omp parallel for for (omp_index k = 0; k < static_cast<omp_index>(nonZeros.size()); ++k) { nonZeros[k] = unaryElementFunction(nonZeros[k]); } } } /* namespace NetworKit */ template<typename L> inline void NetworKit::CSRMatrix::forNonZeroElementsInRow(index i, L handle) const { for (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) { handle(columnIdx[k], nonZeros[k]); } } template<typename L> inline void NetworKit::CSRMatrix::parallelForNonZeroElementsInRow(index i, L handle) const { #pragma omp parallel for for (omp_index k = rowIdx[i]; k < static_cast<omp_index>(rowIdx[i+1]); ++k) { handle(columnIdx[k], nonZeros[k]); } } template<typename L> inline void NetworKit::CSRMatrix::forElementsInRow(index i, L handle) const { Vector rowVector = row(i); index j = 0; rowVector.forElements([&](double val) { handle(j++, val); }); } template<typename L> inline void NetworKit::CSRMatrix::forNonZeroElementsInRowOrder(L handle) const { for (index i = 0; i < nRows; ++i) { for (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) { handle(i, columnIdx[k], nonZeros[k]); } } } template<typename L> inline void NetworKit::CSRMatrix::parallelForNonZeroElementsInRowOrder(L handle) const { #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(nRows); ++i) { for (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) { handle(i, columnIdx[k], nonZeros[k]); } } } #endif /* TESTMATRIX_H_ */
/* * CSRMatrix.h * * Created on: May 6, 2015 * Author: Michael Wegner (michael.wegner@student.kit.edu) */ #ifndef CSRMATRIX_H_ #define CSRMATRIX_H_ #include <vector> #include "../Globals.h" #include "AlgebraicGlobals.h" #include "Vector.h" #include "../graph/Graph.h" #include "../algebraic/SparseAccumulator.h" #include "../auxiliary/Timer.h" namespace NetworKit { /** * @ingroup algebraic * The CSRMatrix class represents a sparse matrix stored in CSR-Format (i.e. compressed sparse row). * If speed is important, use this CSRMatrix instead of the Matrix class. */ class CSRMatrix { private: std::vector<index> rowIdx; std::vector<index> columnIdx; std::vector<double> nonZeros; count nRows; count nCols; bool isSorted; double zero; /** * Quicksort algorithm on columnIdx between [@a left, @a right]. * @param left * @param right */ void quicksort(index left, index right); /** * Partitions columnIdx between [@a left, @a right] after selecting the pivot in the middle. * @param left * @param right * @return The pivot. */ index partition(index left, index right); /** * Binary search the sorted columnIdx vector between [@a left, @a right] for column @a j. * If @a j is not present, the index that is immediately left of the place where @a j would be * is returned. If * @param left * @param right * @param j * @return The position of column @a j in columnIdx or the element immediately to the left of the place where @a j * would be. */ index binarySearchColumns(index left, index right, index j) const; public: /** Default constructor */ CSRMatrix(); /** * Constructs the CSRMatrix with size @a dimension x @a dimension. * @param dimension Defines how many rows and columns this matrix has. * @param zero The zero element (default = 0.0). */ CSRMatrix(const count dimension, const double zero = 0.0); /** * Constructs the CSRMatrix with size @a nRows x @a nCols. * @param nRows Number of rows. * @param nCols Number of columns. * @param zero The zero element (default = 0.0). */ CSRMatrix(const count nRows, const count nCols, const double zero = 0.0); /** * Constructs the @a dimension x @a dimension Matrix from the elements at position @a positions with values @values. * @param dimension Defines how many rows and columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). * @param isSorted True, if the triplets are sorted per row. Default is false. */ CSRMatrix(const count dimension, const std::vector<Triplet>& triplets, const double zero = 0.0, bool isSorted = false); /** * Constructs the @a nRows x @a nCols Matrix from the elements at position @a positions with values @values. * @param nRows Defines how many rows this matrix has. * @param nCols Defines how many columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). * @param isSorted True, if the triplets are sorted per row. Default is false. */ CSRMatrix(const count nRows, const count nCols, const std::vector<Triplet>& triplets, const double zero = 0.0, bool isSorted = false); /** * Constructs the @a nRows x @a nCols Matrix from the elements stored in @a columnIdx and @a values. @a columnIdx and @a values store the colums and values by row. * @param nRows * @param nCols * @param columnIdx * @param values * @param zero The zero element (default is 0.0). * @param isSorted True if the column indices in @a columnIdx are sorted in every row. */ CSRMatrix(const count nRows, const count nCols, const std::vector<std::vector<index>> &columnIdx, const std::vector<std::vector<double>> &values, const double zero = 0.0, bool isSorted = false); /** * Constructs the @a nRows x @a nCols Matrix from the elements at position @a positions with values @values. * @param nRows Defines how many rows this matrix has. * @param nCols Defines how many columns this matrix has. * @param rowIdx The rowIdx vector of the CSR format. * @param columnIdx The columnIdx vector of the CSR format. * @param nonZeros The nonZero vector of the CSR format. Should be as long as the @a columnIdx vector. * @param zero The zero element (default is 0.0). * @param isSorted True, if the triplets are sorted per row. Default is false. */ CSRMatrix(const count nRows, const count nCols, const std::vector<index>& rowIdx, const std::vector<index>& columnIdx, const std::vector<double>& nonZeros, const double zero = 0.0, bool isSorted = false); /** Default copy constructor */ CSRMatrix (const CSRMatrix &other) = default; /** Default move constructor */ CSRMatrix (CSRMatrix &&other) = default; /** Default destructor */ virtual ~CSRMatrix() = default; /** Default move assignment operator */ CSRMatrix& operator=(CSRMatrix &&other) = default; /** Default copy assignment operator */ CSRMatrix& operator=(const CSRMatrix &other) = default; /** * Compares this matrix to @a other and returns true if the shape and zero element are the same as well as * all entries, otherwise returns false. * @param other */ bool operator==(const CSRMatrix& other) const { bool equal = nRows == other.nRows && nCols == other.nCols && zero == other.zero; if (equal) { forNonZeroElementsInRowOrder([&](index i, index j, double value) { if (other(i,j) != value) { equal = false; return; } }); } return equal; } /** * Compares this matrix to @a other and returns false if the shape and zero element are the same as well as * all entries, otherwise returns true. * @param other */ bool operator!=(const CSRMatrix& other) const { return !((*this) == other); } /** * @return Number of rows. */ inline count numberOfRows() const { return nRows; } /** * @return Number of columns. */ inline count numberOfColumns() const { return nCols; } /** * Returns the zero element of the matrix. */ inline double getZero() const { return zero; } /** * @param i The row index. * @return Number of non-zeros in row @a i. */ count nnzInRow(const index i) const; /** * @return Number of non-zeros in this matrix. */ count nnz() const; /** * @return Value at matrix position (i,j). */ double operator()(const index i, const index j) const; /** * Set the matrix at position (@a i, @a j) to @a value. * @note This operation can be linear in the number of non-zeros due to vector element movements */ void setValue(const index i, const index j, const double value); /** * Sorts the column indices in each row for faster access. */ void sort(); /** * @return True if the matrix is sorted, otherwise false. */ bool sorted() const; /** * @return Row @a i of this matrix as vector. */ Vector row(const index i) const; /** * @return Column @a j of this matrix as vector. */ Vector column(const index j) const; /** * @return The main diagonal of this matrix. */ Vector diagonal() const; /** * Adds this matrix to @a other and returns the result. * @return The sum of this matrix and @a other. */ CSRMatrix operator+(const CSRMatrix &other) const; /** * Adds @a other to this matrix. * @return Reference to this matrix. */ CSRMatrix& operator+=(const CSRMatrix &other); /** * Subtracts @a other from this matrix and returns the result. * @return The difference of this matrix and @a other. * */ CSRMatrix operator-(const CSRMatrix &other) const; /** * Subtracts @a other from this matrix. * @return Reference to this matrix. */ CSRMatrix& operator-=(const CSRMatrix &other); /** * Multiplies this matrix with a scalar specified in @a scalar and returns the result. * @return The result of multiplying this matrix with @a scalar. */ CSRMatrix operator*(const double &scalar) const; /** * Multiplies this matrix with a scalar specified in @a scalar. * @return Reference to this matrix. */ CSRMatrix& operator*=(const double &scalar); /** * Multiplies this matrix with @a vector and returns the result. * @return The result of multiplying this matrix with @a vector. */ Vector operator*(const Vector &vector) const; /** * Multiplies this matrix with @a other and returns the result in a new matrix. * @return The result of multiplying this matrix with @a other. */ CSRMatrix operator*(const CSRMatrix &other) const; /** * Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix. * @return The result of dividing this matrix by @a divisor. */ CSRMatrix operator/(const double &divisor) const; /** * Divides this matrix by a divisor specified in @a divisor. * @return Reference to this matrix. */ CSRMatrix& operator/=(const double &divisor); /** * Computes @a A @a binaryOp @a B on the elements of matrix @a A and matrix @a B. * @param A Sorted CSRMatrix. * @param B Sorted CSRMatrix. * @param binaryOp Function handling (double, double) -> double * @return @a A @a binaryOp @a B. * @note @a A and @a B must have the same dimensions and must be sorted. */ template<typename L> static CSRMatrix binaryOperator(const CSRMatrix &A, const CSRMatrix &B, L binaryOp); /** * Computes @a A^T * @a B. * @param A * @param B * @return @a A^T * @a B. * @note The number of rows of @a A must be equal to the number of rows of @a B. */ static CSRMatrix mTmMultiply(const CSRMatrix &A, const CSRMatrix &B); /** * Computes @a A * @a B^T. * @param A * @param B * @return @a A * @a B^T. * @note The number of columns of @a A must be equal to the number of columns of @a B. */ static CSRMatrix mmTMultiply(const CSRMatrix &A, const CSRMatrix &B); /** * Computes @a matrix^T * @a vector. * @param matrix * @param vector * @return @a matrix^T * @a vector. * @note The number of rows of @a matrix must be equal to the dimension of @a vector. */ static Vector mTvMultiply(const CSRMatrix &matrix, const Vector &vector); /** * Transposes this matrix and returns it. */ CSRMatrix transpose() const; /** * Extracts a matrix with rows and columns specified by @a rowIndices and @a columnIndices from this matrix. * The order of rows and columns is equal to the order in @a rowIndices and @a columnIndices. It is also * possible to specify a row or column more than once to get duplicates. * @param rowIndices * @param columnIndices */ CSRMatrix extract(const std::vector<index>& rowIndices, const std::vector<index>& columnIndices) const; /** * Assign the contents of the matrix @a source to this matrix at rows and columns specified by @a rowIndices and * @a columnIndices. That is, entry (i,j) of @a source is assigned to entry (rowIndices[i], columnIndices[j]) of * this matrix. Note that the dimensions of @rowIndices and @a columnIndices must coincide with the number of rows * and columns of @a source. * @param rowIndices * @param columnIndices * @param source */ void assign(const std::vector<index>& rowIndices, const std::vector<index>& columnIndices, const CSRMatrix& source); /** * Applies the unary function @a unaryElementFunction to each value in the matrix. Note that it must hold that the * function applied to the zero element of this matrix returns the zero element. * @param unaryElementFunction */ template<typename F> void apply(const F unaryElementFunction); /** * Compute the (weighted) adjacency matrix of the (weighted) Graph @a graph. * @param graph */ static CSRMatrix adjacencyMatrix(const Graph& graph, double zero = 0.0); /** * Creates a diagonal matrix with dimension equal to the dimension of the Vector @a diagonalElements. The values on * the diagonal are the ones stored in @a diagonalElements (i.e. D(i,i) = diagonalElements[i]). * @param diagonalElements */ static CSRMatrix diagonalMatrix(const Vector& diagonalElements, double zero = 0.0); /** * Returns the (weighted) incidence matrix of the (weighted) Graph @a graph. * @param graph */ static CSRMatrix incidenceMatrix(const Graph& graph, double zero = 0.0); /** * Compute the (weighted) Laplacian of the (weighted) Graph @a graph. * @param graph */ static CSRMatrix laplacianMatrix(const Graph& graph, double zero = 0.0); /** * Returns the (weighted) normalized Laplacian matrix of the (weighted) Graph @a graph * @param graph */ static CSRMatrix normalizedLaplacianMatrix(const Graph& graph, double zero = 0.0); /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template<typename L> void forNonZeroElementsInRow(index row, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template<typename L> void parallelForNonZeroElementsInRow(index row, L handle) const; /** * Iterate over all elements in row @a i in the matrix and call handle(index column, double value) */ template<typename L> void forElementsInRow(index i, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). */ template<typename L> void forNonZeroElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. */ template<typename L> void parallelForNonZeroElementsInRowOrder(L handle) const; }; template<typename L> inline CSRMatrix NetworKit::CSRMatrix::binaryOperator(const CSRMatrix &A, const CSRMatrix &B, L binaryOp) { assert(A.nRows == B.nRows && A.nCols == B.nCols); if (A.sorted() && B.sorted()) { std::vector<index> rowIdx(A.nRows+1); std::vector<std::vector<index>> columns(A.nRows); rowIdx[0] = 0; for (omp_index i = 0; i < static_cast<omp_index>(A.nRows); ++i) { index k = A.rowIdx[i]; index l = B.rowIdx[i]; while (k < A.rowIdx[i+1] && l < B.rowIdx[i+1]) { if (A.columnIdx[k] < B.columnIdx[l]) { columns[i].push_back(A.columnIdx[k]); ++k; } else if (A.columnIdx[k] > B.columnIdx[l]) { columns[i].push_back(B.columnIdx[l]); ++l; } else { // A.columnIdx[k] == B.columnIdx[l] columns[i].push_back(A.columnIdx[k]); ++k; ++l; } ++rowIdx[i+1]; } while (k < A.rowIdx[i+1]) { columns[i].push_back(A.columnIdx[k]); ++k; ++rowIdx[i+1]; } while (l < B.rowIdx[i+1]) { columns[i].push_back(B.columnIdx[l]); ++l; ++rowIdx[i+1]; } } for (index i = 0; i < A.nRows; ++i) { rowIdx[i+1] += rowIdx[i]; } count nnz = rowIdx[A.nRows]; std::vector<index> columnIdx(nnz); std::vector<double> nonZeros(nnz, A.zero); for (omp_index i = 0; i < static_cast<omp_index>(A.nRows); ++i) { for (index cIdx = rowIdx[i], j = 0; cIdx < rowIdx[i+1]; ++cIdx, ++j) { columnIdx[cIdx] = columns[i][j]; } columns[i].clear(); columns[i].resize(0); columns[i].shrink_to_fit(); } for (omp_index i = 0; i < static_cast<omp_index>(A.nRows); ++i) { index k = A.rowIdx[i]; index l = B.rowIdx[i]; for (index cIdx = rowIdx[i]; cIdx < rowIdx[i+1]; ++cIdx) { if (k < A.rowIdx[i+1] && columnIdx[cIdx] == A.columnIdx[k]) { nonZeros[cIdx] = A.nonZeros[k]; ++k; } if (l < B.rowIdx[i+1] && columnIdx[cIdx] == B.columnIdx[l]) { nonZeros[cIdx] = binaryOp(nonZeros[cIdx], B.nonZeros[l]); ++l; } } } return CSRMatrix(A.nRows, A.nCols, rowIdx, columnIdx, nonZeros, A.zero, true); } else { // A or B not sorted std::vector<int64_t> columnPointer(A.nCols, -1); std::vector<double> Arow(A.nCols, A.zero); std::vector<double> Brow(A.nCols, B.zero); std::vector<Triplet> triplets; for (index i = 0; i < A.nRows; ++i) { index listHead = 0; count nnz = 0; // search for nonZeros in our own matrix for (index k = A.rowIdx[i]; k < A.rowIdx[i+1]; ++k) { index j = A.columnIdx[k]; Arow[j] = A.nonZeros[k]; columnPointer[j] = listHead; listHead = j; nnz++; } // search for nonZeros in the other matrix for (index k = B.rowIdx[i]; k < B.rowIdx[i+1]; ++k) { index j = B.columnIdx[k]; Brow[j] = B.nonZeros[k]; if (columnPointer[j] == -1) { // our own matrix does not have a nonZero entry in column j columnPointer[j] = listHead; listHead = j; nnz++; } } // apply operator on the found nonZeros in A and B for (count k = 0; k < nnz; ++k) { double value = binaryOp(Arow[listHead], Brow[listHead]); if (value != A.zero) { triplets.push_back({i,listHead,value}); } index temp = listHead; listHead = columnPointer[listHead]; // reset for next row columnPointer[temp] = -1; Arow[temp] = A.zero; Brow[temp] = B.zero; } nnz = 0; } return CSRMatrix(A.numberOfRows(), A.numberOfColumns(), triplets); } } template<typename F> void CSRMatrix::apply(const F unaryElementFunction) { for (omp_index k = 0; k < static_cast<omp_index>(nonZeros.size()); ++k) { nonZeros[k] = unaryElementFunction(nonZeros[k]); } } } /* namespace NetworKit */ template<typename L> inline void NetworKit::CSRMatrix::forNonZeroElementsInRow(index i, L handle) const { for (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) { handle(columnIdx[k], nonZeros[k]); } } template<typename L> inline void NetworKit::CSRMatrix::parallelForNonZeroElementsInRow(index i, L handle) const { for (omp_index k = rowIdx[i]; k < static_cast<omp_index>(rowIdx[i+1]); ++k) { handle(columnIdx[k], nonZeros[k]); } } template<typename L> inline void NetworKit::CSRMatrix::forElementsInRow(index i, L handle) const { Vector rowVector = row(i); index j = 0; rowVector.forElements([&](double val) { handle(j++, val); }); } template<typename L> inline void NetworKit::CSRMatrix::forNonZeroElementsInRowOrder(L handle) const { for (index i = 0; i < nRows; ++i) { for (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) { handle(i, columnIdx[k], nonZeros[k]); } } } template<typename L> inline void NetworKit::CSRMatrix::parallelForNonZeroElementsInRowOrder(L handle) const { for (omp_index i = 0; i < static_cast<omp_index>(nRows); ++i) { for (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) { handle(i, columnIdx[k], nonZeros[k]); } } } #endif /* TESTMATRIX_H_ */
/* * CSRMatrix.h * * Created on: May 6, 2015 * Author: Michael Wegner (michael.wegner@student.kit.edu) */ #ifndef CSRMATRIX_H_ #define CSRMATRIX_H_ #include <vector> #include "../Globals.h" #include "AlgebraicGlobals.h" #include "Vector.h" #include "../graph/Graph.h" #include "../algebraic/SparseAccumulator.h" #include "../auxiliary/Timer.h" namespace NetworKit { /** * @ingroup algebraic * The CSRMatrix class represents a sparse matrix stored in CSR-Format (i.e. compressed sparse row). * If speed is important, use this CSRMatrix instead of the Matrix class. */ class CSRMatrix { private: std::vector<index> rowIdx; std::vector<index> columnIdx; std::vector<double> nonZeros; count nRows; count nCols; bool isSorted; double zero; /** * Quicksort algorithm on columnIdx between [@a left, @a right]. * @param left * @param right */ void quicksort(index left, index right); /** * Partitions columnIdx between [@a left, @a right] after selecting the pivot in the middle. * @param left * @param right * @return The pivot. */ index partition(index left, index right); /** * Binary search the sorted columnIdx vector between [@a left, @a right] for column @a j. * If @a j is not present, the index that is immediately left of the place where @a j would be * is returned. If * @param left * @param right * @param j * @return The position of column @a j in columnIdx or the element immediately to the left of the place where @a j * would be. */ index binarySearchColumns(index left, index right, index j) const; public: /** Default constructor */ CSRMatrix(); /** * Constructs the CSRMatrix with size @a dimension x @a dimension. * @param dimension Defines how many rows and columns this matrix has. * @param zero The zero element (default = 0.0). */ CSRMatrix(const count dimension, const double zero = 0.0); /** * Constructs the CSRMatrix with size @a nRows x @a nCols. * @param nRows Number of rows. * @param nCols Number of columns. * @param zero The zero element (default = 0.0). */ CSRMatrix(const count nRows, const count nCols, const double zero = 0.0); /** * Constructs the @a dimension x @a dimension Matrix from the elements at position @a positions with values @values. * @param dimension Defines how many rows and columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). * @param isSorted True, if the triplets are sorted per row. Default is false. */ CSRMatrix(const count dimension, const std::vector<Triplet>& triplets, const double zero = 0.0, bool isSorted = false); /** * Constructs the @a nRows x @a nCols Matrix from the elements at position @a positions with values @values. * @param nRows Defines how many rows this matrix has. * @param nCols Defines how many columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). * @param isSorted True, if the triplets are sorted per row. Default is false. */ CSRMatrix(const count nRows, const count nCols, const std::vector<Triplet>& triplets, const double zero = 0.0, bool isSorted = false); /** * Constructs the @a nRows x @a nCols Matrix from the elements stored in @a columnIdx and @a values. @a columnIdx and @a values store the colums and values by row. * @param nRows * @param nCols * @param columnIdx * @param values * @param zero The zero element (default is 0.0). * @param isSorted True if the column indices in @a columnIdx are sorted in every row. */ CSRMatrix(const count nRows, const count nCols, const std::vector<std::vector<index>> &columnIdx, const std::vector<std::vector<double>> &values, const double zero = 0.0, bool isSorted = false); /** * Constructs the @a nRows x @a nCols Matrix from the elements at position @a positions with values @values. * @param nRows Defines how many rows this matrix has. * @param nCols Defines how many columns this matrix has. * @param rowIdx The rowIdx vector of the CSR format. * @param columnIdx The columnIdx vector of the CSR format. * @param nonZeros The nonZero vector of the CSR format. Should be as long as the @a columnIdx vector. * @param zero The zero element (default is 0.0). * @param isSorted True, if the triplets are sorted per row. Default is false. */ CSRMatrix(const count nRows, const count nCols, const std::vector<index>& rowIdx, const std::vector<index>& columnIdx, const std::vector<double>& nonZeros, const double zero = 0.0, bool isSorted = false); /** Default copy constructor */ CSRMatrix (const CSRMatrix &other) = default; /** Default move constructor */ CSRMatrix (CSRMatrix &&other) = default; /** Default destructor */ virtual ~CSRMatrix() = default; /** Default move assignment operator */ CSRMatrix& operator=(CSRMatrix &&other) = default; /** Default copy assignment operator */ CSRMatrix& operator=(const CSRMatrix &other) = default; /** * Compares this matrix to @a other and returns true if the shape and zero element are the same as well as * all entries, otherwise returns false. * @param other */ bool operator==(const CSRMatrix& other) const { bool equal = nRows == other.nRows && nCols == other.nCols && zero == other.zero; if (equal) { forNonZeroElementsInRowOrder([&](index i, index j, double value) { if (other(i,j) != value) { equal = false; return; } }); } return equal; } /** * Compares this matrix to @a other and returns false if the shape and zero element are the same as well as * all entries, otherwise returns true. * @param other */ bool operator!=(const CSRMatrix& other) const { return !((*this) == other); } /** * @return Number of rows. */ inline count numberOfRows() const { return nRows; } /** * @return Number of columns. */ inline count numberOfColumns() const { return nCols; } /** * Returns the zero element of the matrix. */ inline double getZero() const { return zero; } /** * @param i The row index. * @return Number of non-zeros in row @a i. */ count nnzInRow(const index i) const; /** * @return Number of non-zeros in this matrix. */ count nnz() const; /** * @return Value at matrix position (i,j). */ double operator()(const index i, const index j) const; /** * Set the matrix at position (@a i, @a j) to @a value. * @note This operation can be linear in the number of non-zeros due to vector element movements */ void setValue(const index i, const index j, const double value); /** * Sorts the column indices in each row for faster access. */ void sort(); /** * @return True if the matrix is sorted, otherwise false. */ bool sorted() const; /** * @return Row @a i of this matrix as vector. */ Vector row(const index i) const; /** * @return Column @a j of this matrix as vector. */ Vector column(const index j) const; /** * @return The main diagonal of this matrix. */ Vector diagonal() const; /** * Adds this matrix to @a other and returns the result. * @return The sum of this matrix and @a other. */ CSRMatrix operator+(const CSRMatrix &other) const; /** * Adds @a other to this matrix. * @return Reference to this matrix. */ CSRMatrix& operator+=(const CSRMatrix &other); /** * Subtracts @a other from this matrix and returns the result. * @return The difference of this matrix and @a other. * */ CSRMatrix operator-(const CSRMatrix &other) const; /** * Subtracts @a other from this matrix. * @return Reference to this matrix. */ CSRMatrix& operator-=(const CSRMatrix &other); /** * Multiplies this matrix with a scalar specified in @a scalar and returns the result. * @return The result of multiplying this matrix with @a scalar. */ CSRMatrix operator*(const double &scalar) const; /** * Multiplies this matrix with a scalar specified in @a scalar. * @return Reference to this matrix. */ CSRMatrix& operator*=(const double &scalar); /** * Multiplies this matrix with @a vector and returns the result. * @return The result of multiplying this matrix with @a vector. */ Vector operator*(const Vector &vector) const; /** * Multiplies this matrix with @a other and returns the result in a new matrix. * @return The result of multiplying this matrix with @a other. */ CSRMatrix operator*(const CSRMatrix &other) const; /** * Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix. * @return The result of dividing this matrix by @a divisor. */ CSRMatrix operator/(const double &divisor) const; /** * Divides this matrix by a divisor specified in @a divisor. * @return Reference to this matrix. */ CSRMatrix& operator/=(const double &divisor); /** * Computes @a A @a binaryOp @a B on the elements of matrix @a A and matrix @a B. * @param A Sorted CSRMatrix. * @param B Sorted CSRMatrix. * @param binaryOp Function handling (double, double) -> double * @return @a A @a binaryOp @a B. * @note @a A and @a B must have the same dimensions and must be sorted. */ template<typename L> static CSRMatrix binaryOperator(const CSRMatrix &A, const CSRMatrix &B, L binaryOp); /** * Computes @a A^T * @a B. * @param A * @param B * @return @a A^T * @a B. * @note The number of rows of @a A must be equal to the number of rows of @a B. */ static CSRMatrix mTmMultiply(const CSRMatrix &A, const CSRMatrix &B); /** * Computes @a A * @a B^T. * @param A * @param B * @return @a A * @a B^T. * @note The number of columns of @a A must be equal to the number of columns of @a B. */ static CSRMatrix mmTMultiply(const CSRMatrix &A, const CSRMatrix &B); /** * Computes @a matrix^T * @a vector. * @param matrix * @param vector * @return @a matrix^T * @a vector. * @note The number of rows of @a matrix must be equal to the dimension of @a vector. */ static Vector mTvMultiply(const CSRMatrix &matrix, const Vector &vector); /** * Transposes this matrix and returns it. */ CSRMatrix transpose() const; /** * Extracts a matrix with rows and columns specified by @a rowIndices and @a columnIndices from this matrix. * The order of rows and columns is equal to the order in @a rowIndices and @a columnIndices. It is also * possible to specify a row or column more than once to get duplicates. * @param rowIndices * @param columnIndices */ CSRMatrix extract(const std::vector<index>& rowIndices, const std::vector<index>& columnIndices) const; /** * Assign the contents of the matrix @a source to this matrix at rows and columns specified by @a rowIndices and * @a columnIndices. That is, entry (i,j) of @a source is assigned to entry (rowIndices[i], columnIndices[j]) of * this matrix. Note that the dimensions of @rowIndices and @a columnIndices must coincide with the number of rows * and columns of @a source. * @param rowIndices * @param columnIndices * @param source */ void assign(const std::vector<index>& rowIndices, const std::vector<index>& columnIndices, const CSRMatrix& source); /** * Applies the unary function @a unaryElementFunction to each value in the matrix. Note that it must hold that the * function applied to the zero element of this matrix returns the zero element. * @param unaryElementFunction */ template<typename F> void apply(const F unaryElementFunction); /** * Compute the (weighted) adjacency matrix of the (weighted) Graph @a graph. * @param graph */ static CSRMatrix adjacencyMatrix(const Graph& graph, double zero = 0.0); /** * Creates a diagonal matrix with dimension equal to the dimension of the Vector @a diagonalElements. The values on * the diagonal are the ones stored in @a diagonalElements (i.e. D(i,i) = diagonalElements[i]). * @param diagonalElements */ static CSRMatrix diagonalMatrix(const Vector& diagonalElements, double zero = 0.0); /** * Returns the (weighted) incidence matrix of the (weighted) Graph @a graph. * @param graph */ static CSRMatrix incidenceMatrix(const Graph& graph, double zero = 0.0); /** * Compute the (weighted) Laplacian of the (weighted) Graph @a graph. * @param graph */ static CSRMatrix laplacianMatrix(const Graph& graph, double zero = 0.0); /** * Returns the (weighted) normalized Laplacian matrix of the (weighted) Graph @a graph * @param graph */ static CSRMatrix normalizedLaplacianMatrix(const Graph& graph, double zero = 0.0); /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template<typename L> void forNonZeroElementsInRow(index row, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template<typename L> void parallelForNonZeroElementsInRow(index row, L handle) const; /** * Iterate over all elements in row @a i in the matrix and call handle(index column, double value) */ template<typename L> void forElementsInRow(index i, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). */ template<typename L> void forNonZeroElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. */ template<typename L> void parallelForNonZeroElementsInRowOrder(L handle) const; }; template<typename L> inline CSRMatrix NetworKit::CSRMatrix::binaryOperator(const CSRMatrix &A, const CSRMatrix &B, L binaryOp) { assert(A.nRows == B.nRows && A.nCols == B.nCols); if (A.sorted() && B.sorted()) { std::vector<index> rowIdx(A.nRows+1); std::vector<std::vector<index>> columns(A.nRows); rowIdx[0] = 0; #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(A.nRows); ++i) { index k = A.rowIdx[i]; index l = B.rowIdx[i]; while (k < A.rowIdx[i+1] && l < B.rowIdx[i+1]) { if (A.columnIdx[k] < B.columnIdx[l]) { columns[i].push_back(A.columnIdx[k]); ++k; } else if (A.columnIdx[k] > B.columnIdx[l]) { columns[i].push_back(B.columnIdx[l]); ++l; } else { // A.columnIdx[k] == B.columnIdx[l] columns[i].push_back(A.columnIdx[k]); ++k; ++l; } ++rowIdx[i+1]; } while (k < A.rowIdx[i+1]) { columns[i].push_back(A.columnIdx[k]); ++k; ++rowIdx[i+1]; } while (l < B.rowIdx[i+1]) { columns[i].push_back(B.columnIdx[l]); ++l; ++rowIdx[i+1]; } } for (index i = 0; i < A.nRows; ++i) { rowIdx[i+1] += rowIdx[i]; } count nnz = rowIdx[A.nRows]; std::vector<index> columnIdx(nnz); std::vector<double> nonZeros(nnz, A.zero); #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(A.nRows); ++i) { for (index cIdx = rowIdx[i], j = 0; cIdx < rowIdx[i+1]; ++cIdx, ++j) { columnIdx[cIdx] = columns[i][j]; } columns[i].clear(); columns[i].resize(0); columns[i].shrink_to_fit(); } #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(A.nRows); ++i) { index k = A.rowIdx[i]; index l = B.rowIdx[i]; for (index cIdx = rowIdx[i]; cIdx < rowIdx[i+1]; ++cIdx) { if (k < A.rowIdx[i+1] && columnIdx[cIdx] == A.columnIdx[k]) { nonZeros[cIdx] = A.nonZeros[k]; ++k; } if (l < B.rowIdx[i+1] && columnIdx[cIdx] == B.columnIdx[l]) { nonZeros[cIdx] = binaryOp(nonZeros[cIdx], B.nonZeros[l]); ++l; } } } return CSRMatrix(A.nRows, A.nCols, rowIdx, columnIdx, nonZeros, A.zero, true); } else { // A or B not sorted std::vector<int64_t> columnPointer(A.nCols, -1); std::vector<double> Arow(A.nCols, A.zero); std::vector<double> Brow(A.nCols, B.zero); std::vector<Triplet> triplets; for (index i = 0; i < A.nRows; ++i) { index listHead = 0; count nnz = 0; // search for nonZeros in our own matrix for (index k = A.rowIdx[i]; k < A.rowIdx[i+1]; ++k) { index j = A.columnIdx[k]; Arow[j] = A.nonZeros[k]; columnPointer[j] = listHead; listHead = j; nnz++; } // search for nonZeros in the other matrix for (index k = B.rowIdx[i]; k < B.rowIdx[i+1]; ++k) { index j = B.columnIdx[k]; Brow[j] = B.nonZeros[k]; if (columnPointer[j] == -1) { // our own matrix does not have a nonZero entry in column j columnPointer[j] = listHead; listHead = j; nnz++; } } // apply operator on the found nonZeros in A and B for (count k = 0; k < nnz; ++k) { double value = binaryOp(Arow[listHead], Brow[listHead]); if (value != A.zero) { triplets.push_back({i,listHead,value}); } index temp = listHead; listHead = columnPointer[listHead]; // reset for next row columnPointer[temp] = -1; Arow[temp] = A.zero; Brow[temp] = B.zero; } nnz = 0; } return CSRMatrix(A.numberOfRows(), A.numberOfColumns(), triplets); } } template<typename F> void CSRMatrix::apply(const F unaryElementFunction) { #pragma omp parallel for for (omp_index k = 0; k < static_cast<omp_index>(nonZeros.size()); ++k) { nonZeros[k] = unaryElementFunction(nonZeros[k]); } } } /* namespace NetworKit */ template<typename L> inline void NetworKit::CSRMatrix::forNonZeroElementsInRow(index i, L handle) const { for (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) { handle(columnIdx[k], nonZeros[k]); } } template<typename L> inline void NetworKit::CSRMatrix::parallelForNonZeroElementsInRow(index i, L handle) const { #pragma omp parallel for for (omp_index k = rowIdx[i]; k < static_cast<omp_index>(rowIdx[i+1]); ++k) { handle(columnIdx[k], nonZeros[k]); } } template<typename L> inline void NetworKit::CSRMatrix::forElementsInRow(index i, L handle) const { Vector rowVector = row(i); index j = 0; rowVector.forElements([&](double val) { handle(j++, val); }); } template<typename L> inline void NetworKit::CSRMatrix::forNonZeroElementsInRowOrder(L handle) const { for (index i = 0; i < nRows; ++i) { for (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) { handle(i, columnIdx[k], nonZeros[k]); } } } template<typename L> inline void NetworKit::CSRMatrix::parallelForNonZeroElementsInRowOrder(L handle) const { #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(nRows); ++i) { for (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) { handle(i, columnIdx[k], nonZeros[k]); } } } #endif /* TESTMATRIX_H_ */
munit.c
/* Copyright (c) 2013-2017 Evan Nemerson <evan@nemerson.com> * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /*** Configuration ***/ /* This is just where the output from the test goes. It's really just * meant to let you choose stdout or stderr, but if anyone really want * to direct it to a file let me know, it would be fairly easy to * support. */ #if !defined(MUNIT_OUTPUT_FILE) # define MUNIT_OUTPUT_FILE stdout #endif /* This is a bit more useful; it tells µnit how to format the seconds in * timed tests. If your tests run for longer you might want to reduce * it, and if your computer is really fast and your tests are tiny you * can increase it. */ #if !defined(MUNIT_TEST_TIME_FORMAT) # define MUNIT_TEST_TIME_FORMAT "0.8f" #endif /* If you have long test names you might want to consider bumping * this. The result information takes 43 characters. */ #if !defined(MUNIT_TEST_NAME_LEN) # define MUNIT_TEST_NAME_LEN 37 #endif /* If you don't like the timing information, you can disable it by * defining MUNIT_DISABLE_TIMING. */ #if !defined(MUNIT_DISABLE_TIMING) # define MUNIT_ENABLE_TIMING #endif /*** End configuration ***/ #if defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE < 200809L) # undef _POSIX_C_SOURCE #endif #if !defined(_POSIX_C_SOURCE) # define _POSIX_C_SOURCE 200809L #endif /* Solaris freaks out if you try to use a POSIX or SUS standard without * the "right" C standard. */ #if defined(_XOPEN_SOURCE) # undef _XOPEN_SOURCE #endif #if defined(__STDC_VERSION__) # if __STDC_VERSION__ >= 201112L # define _XOPEN_SOURCE 700 # elif __STDC_VERSION__ >= 199901L # define _XOPEN_SOURCE 600 # endif #endif /* Because, according to Microsoft, POSIX is deprecated. You've got * to appreciate the chutzpah. */ #if defined(_MSC_VER) && !defined(_CRT_NONSTDC_NO_DEPRECATE) # define _CRT_NONSTDC_NO_DEPRECATE #endif #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) # include <stdbool.h> #elif defined(_WIN32) /* https://msdn.microsoft.com/en-us/library/tf4dy80a.aspx */ #endif #include <limits.h> #include <time.h> #include <errno.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <setjmp.h> #if !defined(MUNIT_NO_NL_LANGINFO) && !defined(_WIN32) #define MUNIT_NL_LANGINFO #include <locale.h> #include <langinfo.h> #include <strings.h> #endif #if !defined(_WIN32) # include <unistd.h> # include <sys/types.h> # include <sys/wait.h> #else # include <windows.h> # include <io.h> # include <fcntl.h> # if !defined(STDERR_FILENO) # define STDERR_FILENO _fileno(stderr) # endif #endif #include "munit.h" #define MUNIT_STRINGIFY(x) #x #define MUNIT_XSTRINGIFY(x) MUNIT_STRINGIFY(x) #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) || defined(_Thread_local) # define MUNIT_THREAD_LOCAL _Thread_local #elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__) # define MUNIT_THREAD_LOCAL __thread #elif defined(_WIN32) # define MUNIT_THREAD_LOCAL __declspec(thread) #endif /* MSVC 12.0 will emit a warning at /W4 for code like 'do { ... } * while (0)', or 'do { ... } while (true)'. I'm pretty sure nobody * at Microsoft compiles with /W4. */ #if defined(_MSC_VER) && (_MSC_VER <= 1800) #pragma warning(disable: 4127) #endif #if defined(_WIN32) || defined(__EMSCRIPTEN__) # define MUNIT_NO_FORK #endif #if defined(__EMSCRIPTEN__) # define MUNIT_NO_BUFFER #endif /*** Logging ***/ static MunitLogLevel munit_log_level_visible = MUNIT_LOG_INFO; static MunitLogLevel munit_log_level_fatal = MUNIT_LOG_ERROR; #if defined(MUNIT_THREAD_LOCAL) static MUNIT_THREAD_LOCAL bool munit_error_jmp_buf_valid = false; static MUNIT_THREAD_LOCAL jmp_buf munit_error_jmp_buf; #endif /* At certain warning levels, mingw will trigger warnings about * suggesting the format attribute, which we've explicity *not* set * because it will then choke on our attempts to use the MS-specific * I64 modifier for size_t (which we have to use since MSVC doesn't * support the C99 z modifier). */ #if defined(__MINGW32__) || defined(__MINGW64__) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wsuggest-attribute=format" #endif MUNIT_PRINTF(5,0) static void munit_logf_exv(MunitLogLevel level, FILE* fp, const char* filename, int line, const char* format, va_list ap) { if (level < munit_log_level_visible) return; switch (level) { case MUNIT_LOG_DEBUG: fputs("Debug", fp); break; case MUNIT_LOG_INFO: fputs("Info", fp); break; case MUNIT_LOG_WARNING: fputs("Warning", fp); break; case MUNIT_LOG_ERROR: fputs("Error", fp); break; default: munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Invalid log level (%d)", level); return; } fputs(": ", fp); if (filename != NULL) fprintf(fp, "%s:%d: ", filename, line); vfprintf(fp, format, ap); fputc('\n', fp); } MUNIT_PRINTF(3,4) static void munit_logf_internal(MunitLogLevel level, FILE* fp, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, fp, NULL, 0, format, ap); va_end(ap); } static void munit_log_internal(MunitLogLevel level, FILE* fp, const char* message) { munit_logf_internal(level, fp, "%s", message); } void munit_logf_ex(MunitLogLevel level, const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, stderr, filename, line, format, ap); va_end(ap); if (level >= munit_log_level_fatal) { #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } } void munit_errorf_ex(const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(MUNIT_LOG_ERROR, stderr, filename, line, format, ap); va_end(ap); #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } #if defined(__MINGW32__) || defined(__MINGW64__) #pragma GCC diagnostic pop #endif #if !defined(MUNIT_STRERROR_LEN) # define MUNIT_STRERROR_LEN 80 #endif static void munit_log_errno(MunitLogLevel level, FILE* fp, const char* msg) { #if defined(MUNIT_NO_STRERROR_R) || (defined(__MINGW32__) && !defined(MINGW_HAS_SECURE_API)) munit_logf_internal(level, fp, "%s: %s (%d)", msg, strerror(errno), errno); #else char munit_error_str[MUNIT_STRERROR_LEN]; munit_error_str[0] = '\0'; #if !defined(_WIN32) strerror_r(errno, munit_error_str, MUNIT_STRERROR_LEN); #else strerror_s(munit_error_str, MUNIT_STRERROR_LEN, errno); #endif munit_logf_internal(level, fp, "%s: %s (%d)", msg, munit_error_str, errno); #endif } /*** Memory allocation ***/ void* munit_malloc_ex(const char* filename, int line, size_t size) { void* ptr; if (size == 0) return NULL; ptr = calloc(1, size); if (MUNIT_UNLIKELY(ptr == NULL)) { munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Failed to allocate %" MUNIT_SIZE_MODIFIER "u bytes.", size); } return ptr; } /*** Timer code ***/ #if defined(MUNIT_ENABLE_TIMING) #define psnip_uint64_t munit_uint64_t #define psnip_uint32_t munit_uint32_t /* Code copied from portable-snippets * <https://github.com/nemequ/portable-snippets/>. If you need to * change something, please do it there so we can keep the code in * sync. */ /* Clocks (v1) * Portable Snippets - https://gitub.com/nemequ/portable-snippets * Created by Evan Nemerson <evan@nemerson.com> * * To the extent possible under law, the authors have waived all * copyright and related or neighboring rights to this code. For * details, see the Creative Commons Zero 1.0 Universal license at * https://creativecommons.org/publicdomain/zero/1.0/ */ #if !defined(PSNIP_CLOCK_H) #define PSNIP_CLOCK_H #if !defined(psnip_uint64_t) # include "../exact-int/exact-int.h" #endif #if !defined(PSNIP_CLOCK_STATIC_INLINE) # if defined(__GNUC__) # define PSNIP_CLOCK__COMPILER_ATTRIBUTES __attribute__((__unused__)) # else # define PSNIP_CLOCK__COMPILER_ATTRIBUTES # endif # define PSNIP_CLOCK__FUNCTION PSNIP_CLOCK__COMPILER_ATTRIBUTES static #endif enum PsnipClockType { /* This clock provides the current time, in units since 1970-01-01 * 00:00:00 UTC not including leap seconds. In other words, UNIX * time. Keep in mind that this clock doesn't account for leap * seconds, and can go backwards (think NTP adjustments). */ PSNIP_CLOCK_TYPE_WALL = 1, /* The CPU time is a clock which increases only when the current * process is active (i.e., it doesn't increment while blocking on * I/O). */ PSNIP_CLOCK_TYPE_CPU = 2, /* Monotonic time is always running (unlike CPU time), but it only ever moves forward unless you reboot the system. Things like NTP adjustments have no effect on this clock. */ PSNIP_CLOCK_TYPE_MONOTONIC = 3 }; struct PsnipClockTimespec { psnip_uint64_t seconds; psnip_uint64_t nanoseconds; }; /* Methods we support: */ #define PSNIP_CLOCK_METHOD_CLOCK_GETTIME 1 #define PSNIP_CLOCK_METHOD_TIME 2 #define PSNIP_CLOCK_METHOD_GETTIMEOFDAY 3 #define PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER 4 #define PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME 5 #define PSNIP_CLOCK_METHOD_CLOCK 6 #define PSNIP_CLOCK_METHOD_GETPROCESSTIMES 7 #define PSNIP_CLOCK_METHOD_GETRUSAGE 8 #define PSNIP_CLOCK_METHOD_GETSYSTEMTIMEPRECISEASFILETIME 9 #define PSNIP_CLOCK_METHOD_GETTICKCOUNT64 10 #include <assert.h> #if defined(HEDLEY_UNREACHABLE) # define PSNIP_CLOCK_UNREACHABLE() HEDLEY_UNREACHABLE() #else # define PSNIP_CLOCK_UNREACHABLE() assert(0) #endif /* Choose an implementation */ /* #undef PSNIP_CLOCK_WALL_METHOD */ /* #undef PSNIP_CLOCK_CPU_METHOD */ /* #undef PSNIP_CLOCK_MONOTONIC_METHOD */ /* We want to be able to detect the libc implementation, so we include <limits.h> (<features.h> isn't available everywhere). */ #if defined(__unix__) || defined(__unix) || defined(__linux__) # include <limits.h> # include <unistd.h> #endif #if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) /* These are known to work without librt. If you know of others * please let us know so we can add them. */ # if \ (defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17))) || \ (defined(__FreeBSD__)) # define PSNIP_CLOCK_HAVE_CLOCK_GETTIME # elif !defined(PSNIP_CLOCK_NO_LIBRT) # define PSNIP_CLOCK_HAVE_CLOCK_GETTIME # endif #endif #if defined(_WIN32) # if !defined(PSNIP_CLOCK_CPU_METHOD) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_GETPROCESSTIMES # endif # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER # endif #endif #if defined(__MACH__) && !defined(__gnu_hurd__) # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME # endif #endif #if defined(PSNIP_CLOCK_HAVE_CLOCK_GETTIME) # include <time.h> # if !defined(PSNIP_CLOCK_WALL_METHOD) # if defined(CLOCK_REALTIME_PRECISE) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME_PRECISE # elif !defined(__sun) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME # endif # endif # if !defined(PSNIP_CLOCK_CPU_METHOD) # if defined(_POSIX_CPUTIME) || defined(CLOCK_PROCESS_CPUTIME_ID) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_PROCESS_CPUTIME_ID # elif defined(CLOCK_VIRTUAL) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_VIRTUAL # endif # endif # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # if defined(CLOCK_MONOTONIC_RAW) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC # elif defined(CLOCK_MONOTONIC_PRECISE) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC_PRECISE # elif defined(_POSIX_MONOTONIC_CLOCK) || defined(CLOCK_MONOTONIC) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC # endif # endif #endif #if defined(_POSIX_VERSION) && (_POSIX_VERSION >= 200112L) # if !defined(PSNIP_CLOCK_WALL_METHOD) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_GETTIMEOFDAY # endif #endif #if !defined(PSNIP_CLOCK_WALL_METHOD) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_TIME #endif #if !defined(PSNIP_CLOCK_CPU_METHOD) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK #endif /* Primarily here for testing. */ #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) && defined(PSNIP_CLOCK_REQUIRE_MONOTONIC) # error No monotonic clock found. #endif /* Implementations */ #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_TIME)) # include <time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) # include <sys/time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) # include <windows.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) # include <sys/time.h> # include <sys/resource.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) # include <CoreServices/CoreServices.h> # include <mach/mach.h> # include <mach/mach_time.h> #endif /*** Implementations ***/ #define PSNIP_CLOCK_NSEC_PER_SEC ((psnip_uint32_t) (1000000000ULL)) #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock__clock_getres (clockid_t clk_id) { struct timespec res; int r; r = clock_getres(clk_id, &res); if (r != 0) return 0; return (psnip_uint32_t) (PSNIP_CLOCK_NSEC_PER_SEC / res.tv_nsec); } PSNIP_CLOCK__FUNCTION int psnip_clock__clock_gettime (clockid_t clk_id, struct PsnipClockTimespec* res) { struct timespec ts; if (clock_gettime(clk_id, &ts) != 0) return -10; res->seconds = (psnip_uint64_t) (ts.tv_sec); res->nanoseconds = (psnip_uint64_t) (ts.tv_nsec); return 0; } #endif PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_wall_get_precision (void) { #if !defined(PSNIP_CLOCK_WALL_METHOD) return 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_WALL); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY return 1000000; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME return 1; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_wall_get_time (struct PsnipClockTimespec* res) { (void) res; #if !defined(PSNIP_CLOCK_WALL_METHOD) return -2; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_WALL, res); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME res->seconds = time(NULL); res->nanoseconds = 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY struct timeval tv; if (gettimeofday(&tv, NULL) != 0) return -6; res->seconds = tv.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_cpu_get_precision (void) { #if !defined(PSNIP_CLOCK_CPU_METHOD) return 0; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_CPU); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK return CLOCKS_PER_SEC; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES return PSNIP_CLOCK_NSEC_PER_SEC / 100; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_cpu_get_time (struct PsnipClockTimespec* res) { #if !defined(PSNIP_CLOCK_CPU_METHOD) (void) res; return -2; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_CPU, res); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK clock_t t = clock(); if (t == ((clock_t) -1)) return -5; res->seconds = t / CLOCKS_PER_SEC; res->nanoseconds = (t % CLOCKS_PER_SEC) * (PSNIP_CLOCK_NSEC_PER_SEC / CLOCKS_PER_SEC); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES FILETIME CreationTime, ExitTime, KernelTime, UserTime; LARGE_INTEGER date, adjust; if (!GetProcessTimes(GetCurrentProcess(), &CreationTime, &ExitTime, &KernelTime, &UserTime)) return -7; /* http://www.frenk.com/2009/12/convert-filetime-to-unix-timestamp/ */ date.HighPart = UserTime.dwHighDateTime; date.LowPart = UserTime.dwLowDateTime; adjust.QuadPart = 11644473600000 * 10000; date.QuadPart -= adjust.QuadPart; res->seconds = date.QuadPart / 10000000; res->nanoseconds = (date.QuadPart % 10000000) * (PSNIP_CLOCK_NSEC_PER_SEC / 100); #elif PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE struct rusage usage; if (getrusage(RUSAGE_SELF, &usage) != 0) return -8; res->seconds = usage.ru_utime.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else (void) res; return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_monotonic_get_precision (void) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) return 0; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME static mach_timebase_info_data_t tbi = { 0, }; if (tbi.denom == 0) mach_timebase_info(&tbi); return (psnip_uint32_t) (tbi.numer / tbi.denom); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 return 1000; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER Frequency; QueryPerformanceFrequency(&Frequency); return (psnip_uint32_t) ((Frequency.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) ? PSNIP_CLOCK_NSEC_PER_SEC : Frequency.QuadPart); #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_monotonic_get_time (struct PsnipClockTimespec* res) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) (void) res; return -2; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC, res); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME psnip_uint64_t nsec = mach_absolute_time(); static mach_timebase_info_data_t tbi = { 0, }; if (tbi.denom == 0) mach_timebase_info(&tbi); nsec *= ((psnip_uint64_t) tbi.numer) / ((psnip_uint64_t) tbi.denom); res->seconds = nsec / PSNIP_CLOCK_NSEC_PER_SEC; res->nanoseconds = nsec % PSNIP_CLOCK_NSEC_PER_SEC; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER t, f; if (QueryPerformanceCounter(&t) == 0) return -12; QueryPerformanceFrequency(&f); res->seconds = t.QuadPart / f.QuadPart; res->nanoseconds = t.QuadPart % f.QuadPart; if (f.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) res->nanoseconds /= f.QuadPart / PSNIP_CLOCK_NSEC_PER_SEC; else res->nanoseconds *= PSNIP_CLOCK_NSEC_PER_SEC / f.QuadPart; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 const ULONGLONG msec = GetTickCount64(); res->seconds = msec / 1000; res->nanoseconds = sec % 1000; #else return -2; #endif return 0; } /* Returns the number of ticks per second for the specified clock. * For example, a clock with millisecond precision would return 1000, * and a clock with 1 second (such as the time() function) would * return 1. * * If the requested clock isn't available, it will return 0. * Hopefully this will be rare, but if it happens to you please let us * know so we can work on finding a way to support your system. * * Note that different clocks on the same system often have a * different precisions. */ PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_get_precision (enum PsnipClockType clock_type) { switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_precision (); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_precision (); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_precision (); } PSNIP_CLOCK_UNREACHABLE(); return 0; } /* Set the provided timespec to the requested time. Returns 0 on * success, or a negative value on failure. */ PSNIP_CLOCK__FUNCTION int psnip_clock_get_time (enum PsnipClockType clock_type, struct PsnipClockTimespec* res) { assert(res != NULL); switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_time (res); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_time (res); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_time (res); } return -1; } #endif /* !defined(PSNIP_CLOCK_H) */ static psnip_uint64_t munit_clock_get_elapsed(struct PsnipClockTimespec* start, struct PsnipClockTimespec* end) { psnip_uint64_t r = (end->seconds - start->seconds) * PSNIP_CLOCK_NSEC_PER_SEC; if (end->nanoseconds < start->nanoseconds) { r -= (start->nanoseconds - end->nanoseconds); } else { r += (end->nanoseconds - start->nanoseconds); } return r; } #endif /* defined(MUNIT_ENABLE_TIMING) */ /*** PRNG stuff ***/ /* This is (unless I screwed up, which is entirely possible) the * version of PCG with 32-bit state. It was chosen because it has a * small enough state that we should reliably be able to use CAS * instead of requiring a lock for thread-safety. * * If I did screw up, I probably will not bother changing it unless * there is a significant bias. It's really not important this be * particularly strong, as long as it is fairly random it's much more * important that it be reproducible, so bug reports have a better * chance of being reproducible. */ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) && !defined(__EMSCRIPTEN__) # define HAVE_STDATOMIC #elif defined(__clang__) # if __has_extension(c_atomic) # define HAVE_CLANG_ATOMICS # endif #endif /* Workaround for http://llvm.org/bugs/show_bug.cgi?id=26911 */ #if defined(__clang__) && defined(_WIN32) # undef HAVE_STDATOMIC # if defined(__c2__) # undef HAVE_CLANG_ATOMICS # endif #endif #if defined(_OPENMP) # define ATOMIC_UINT32_T uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(HAVE_STDATOMIC) # include <stdatomic.h> # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) ATOMIC_VAR_INIT(x) #elif defined(HAVE_CLANG_ATOMICS) # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(_WIN32) # define ATOMIC_UINT32_T volatile LONG # define ATOMIC_UINT32_INIT(x) (x) #else # define ATOMIC_UINT32_T volatile uint32_t # define ATOMIC_UINT32_INIT(x) (x) #endif static ATOMIC_UINT32_T munit_rand_state = ATOMIC_UINT32_INIT(42); #if defined(_OPENMP) static inline void munit_atomic_store(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T value) { #pragma omp critical (munit_atomics) *dest = value; } static inline uint32_t munit_atomic_load(ATOMIC_UINT32_T* src) { int ret; #pragma omp critical (munit_atomics) ret = *src; return ret; } static inline uint32_t munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { bool ret; #pragma omp critical (munit_atomics) { if (*dest == *expected) { *dest = desired; ret = true; } else { ret = false; } } return ret; } #elif defined(HAVE_STDATOMIC) # define munit_atomic_store(dest, value) atomic_store(dest, value) # define munit_atomic_load(src) atomic_load(src) # define munit_atomic_cas(dest, expected, value) atomic_compare_exchange_weak(dest, expected, value) #elif defined(HAVE_CLANG_ATOMICS) # define munit_atomic_store(dest, value) __c11_atomic_store(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __c11_atomic_load(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __c11_atomic_compare_exchange_weak(dest, expected, value, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) # define munit_atomic_store(dest, value) __atomic_store_n(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __atomic_load_n(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __atomic_compare_exchange_n(dest, expected, value, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ >= 4) # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) __sync_bool_compare_and_swap(dest, *expected, value) #elif defined(_WIN32) /* Untested */ # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) InterlockedCompareExchange((dest), (value), *(expected)) #else # warning No atomic implementation, PRNG will not be thread-safe # define munit_atomic_store(dest, value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) static inline bool munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { if (*dest == *expected) { *dest = desired; return true; } else { return false; } } #endif #define MUNIT_PRNG_MULTIPLIER (747796405U) #define MUNIT_PRNG_INCREMENT (1729U) static munit_uint32_t munit_rand_next_state(munit_uint32_t state) { return state * MUNIT_PRNG_MULTIPLIER + MUNIT_PRNG_INCREMENT; } static munit_uint32_t munit_rand_from_state(munit_uint32_t state) { munit_uint32_t res = ((state >> ((state >> 28) + 4)) ^ state) * (277803737U); res ^= res >> 22; return res; } void munit_rand_seed(munit_uint32_t seed) { munit_uint32_t state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); munit_atomic_store(&munit_rand_state, state); } static munit_uint32_t munit_rand_generate_seed(void) { struct PsnipClockTimespec wc = { 0, }; munit_uint32_t seed, state; psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wc); seed = (munit_uint32_t) wc.nanoseconds; state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); return munit_rand_from_state(state); } static munit_uint32_t munit_rand_state_uint32(munit_uint32_t* state) { const munit_uint32_t old = *state; *state = munit_rand_next_state(old); return munit_rand_from_state(old); } munit_uint32_t munit_rand_uint32(void) { munit_uint32_t old, state; do { old = munit_atomic_load(&munit_rand_state); state = munit_rand_next_state(old); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return munit_rand_from_state(old); } static void munit_rand_state_memory(munit_uint32_t* state, size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { size_t members_remaining = size / sizeof(munit_uint32_t); size_t bytes_remaining = size % sizeof(munit_uint32_t); munit_uint8_t* b = data; munit_uint32_t rv; while (members_remaining-- > 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, sizeof(munit_uint32_t)); b += sizeof(munit_uint32_t); } if (bytes_remaining != 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, bytes_remaining); } } void munit_rand_memory(size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { munit_uint32_t old, state; do { state = old = munit_atomic_load(&munit_rand_state); munit_rand_state_memory(&state, size, data); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); } static munit_uint32_t munit_rand_state_at_most(munit_uint32_t* state, munit_uint32_t salt, munit_uint32_t max) { /* We want (UINT32_MAX + 1) % max, which in unsigned arithmetic is the same * as (UINT32_MAX + 1 - max) % max = -max % max. We compute -max using not * to avoid compiler warnings. */ const munit_uint32_t min = (~max + 1U) % max; munit_uint32_t x; if (max == (~((munit_uint32_t) 0U))) return munit_rand_state_uint32(state) ^ salt; max++; do { x = munit_rand_state_uint32(state) ^ salt; } while (x < min); return x % max; } static munit_uint32_t munit_rand_at_most(munit_uint32_t salt, munit_uint32_t max) { munit_uint32_t old, state; munit_uint32_t retval; do { state = old = munit_atomic_load(&munit_rand_state); retval = munit_rand_state_at_most(&state, salt, max); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } int munit_rand_int_range(int min, int max) { munit_uint64_t range = (munit_uint64_t) max - (munit_uint64_t) min; if (min > max) return munit_rand_int_range(max, min); if (range > (~((munit_uint32_t) 0U))) range = (~((munit_uint32_t) 0U)); return min + munit_rand_at_most(0, (munit_uint32_t) range); } double munit_rand_double(void) { munit_uint32_t old, state; double retval = 0.0; do { state = old = munit_atomic_load(&munit_rand_state); /* See http://mumble.net/~campbell/tmp/random_real.c for how to do * this right. Patches welcome if you feel that this is too * biased. */ retval = munit_rand_state_uint32(&state) / ((~((munit_uint32_t) 0U)) + 1.0); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } /*** Test suite handling ***/ typedef struct { unsigned int successful; unsigned int skipped; unsigned int failed; unsigned int errored; #if defined(MUNIT_ENABLE_TIMING) munit_uint64_t cpu_clock; munit_uint64_t wall_clock; #endif } MunitReport; typedef struct { const char* prefix; const MunitSuite* suite; const char** tests; munit_uint32_t seed; unsigned int iterations; MunitParameter* parameters; bool single_parameter_mode; void* user_data; MunitReport report; bool colorize; bool fork; bool show_stderr; bool fatal_failures; } MunitTestRunner; const char* munit_parameters_get(const MunitParameter params[], const char* key) { const MunitParameter* param; for (param = params ; param != NULL && param->name != NULL ; param++) if (strcmp(param->name, key) == 0) return param->value; return NULL; } static void munit_print_time(FILE* fp, munit_uint64_t nanoseconds) { fprintf(fp, "%" MUNIT_TEST_TIME_FORMAT, ((double) nanoseconds) / ((double) PSNIP_CLOCK_NSEC_PER_SEC)); } /* Add a paramter to an array of parameters. */ static MunitResult munit_parameters_add(size_t* params_size, MunitParameter* params[MUNIT_ARRAY_PARAM(*params_size)], char* name, char* value) { *params = realloc(*params, sizeof(MunitParameter) * (*params_size + 2)); if (*params == NULL) return MUNIT_ERROR; (*params)[*params_size].name = name; (*params)[*params_size].value = value; (*params_size)++; (*params)[*params_size].name = NULL; (*params)[*params_size].value = NULL; return MUNIT_OK; } /* Concatenate two strings, but just return one of the components * unaltered if the other is NULL or "". */ static char* munit_maybe_concat(size_t* len, char* prefix, char* suffix) { char* res; size_t res_l; const size_t prefix_l = prefix != NULL ? strlen(prefix) : 0; const size_t suffix_l = suffix != NULL ? strlen(suffix) : 0; if (prefix_l == 0 && suffix_l == 0) { res = NULL; res_l = 0; } else if (prefix_l == 0 && suffix_l != 0) { res = suffix; res_l = suffix_l; } else if (prefix_l != 0 && suffix_l == 0) { res = prefix; res_l = prefix_l; } else { res_l = prefix_l + suffix_l; res = malloc(res_l + 1); memcpy(res, prefix, prefix_l); memcpy(res + prefix_l, suffix, suffix_l); res[res_l] = 0; } if (len != NULL) *len = res_l; return res; } /* Possbily free a string returned by munit_maybe_concat. */ static void munit_maybe_free_concat(char* s, const char* prefix, const char* suffix) { if (prefix != s && suffix != s) free(s); } /* Cheap string hash function, just used to salt the PRNG. */ static munit_uint32_t munit_str_hash(const char* name) { const char *p; munit_uint32_t h = 5381U; for (p = name; *p != '\0'; p++) h = (h << 5) + h + *p; return h; } static void munit_splice(int from, int to) { munit_uint8_t buf[1024]; #if !defined(_WIN32) ssize_t len; ssize_t bytes_written; ssize_t write_res; #else int len; int bytes_written; int write_res; #endif do { len = read(from, buf, sizeof(buf)); if (len > 0) { bytes_written = 0; do { write_res = write(to, buf + bytes_written, len - bytes_written); if (write_res < 0) break; bytes_written += write_res; } while (bytes_written < len); } else break; } while (true); } /* This is the part that should be handled in the child process */ static MunitResult munit_test_runner_exec(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[], MunitReport* report) { unsigned int iterations = runner->iterations; MunitResult result = MUNIT_FAIL; #if defined(MUNIT_ENABLE_TIMING) struct PsnipClockTimespec wall_clock_begin = { 0, }, wall_clock_end = { 0, }; struct PsnipClockTimespec cpu_clock_begin = { 0, }, cpu_clock_end = { 0, }; #endif unsigned int i = 0; if ((test->options & MUNIT_TEST_OPTION_SINGLE_ITERATION) == MUNIT_TEST_OPTION_SINGLE_ITERATION) iterations = 1; else if (iterations == 0) iterations = runner->suite->iterations; munit_rand_seed(runner->seed); do { void* data = (test->setup == NULL) ? runner->user_data : test->setup(params, runner->user_data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_begin); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_begin); #endif result = test->test(params, data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_end); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_end); #endif if (test->tear_down != NULL) test->tear_down(data); if (MUNIT_LIKELY(result == MUNIT_OK)) { report->successful++; #if defined(MUNIT_ENABLE_TIMING) report->wall_clock += munit_clock_get_elapsed(&wall_clock_begin, &wall_clock_end); report->cpu_clock += munit_clock_get_elapsed(&cpu_clock_begin, &cpu_clock_end); #endif } else { switch ((int) result) { case MUNIT_SKIP: report->skipped++; break; case MUNIT_FAIL: report->failed++; break; case MUNIT_ERROR: report->errored++; break; default: break; } break; } } while (++i < iterations); return result; } #if defined(MUNIT_EMOTICON) # define MUNIT_RESULT_STRING_OK ":)" # define MUNIT_RESULT_STRING_SKIP ":|" # define MUNIT_RESULT_STRING_FAIL ":(" # define MUNIT_RESULT_STRING_ERROR ":o" # define MUNIT_RESULT_STRING_TODO ":/" #else # define MUNIT_RESULT_STRING_OK "OK " # define MUNIT_RESULT_STRING_SKIP "SKIP " # define MUNIT_RESULT_STRING_FAIL "FAIL " # define MUNIT_RESULT_STRING_ERROR "ERROR" # define MUNIT_RESULT_STRING_TODO "TODO " #endif static void munit_test_runner_print_color(const MunitTestRunner* runner, const char* string, char color) { if (runner->colorize) fprintf(MUNIT_OUTPUT_FILE, "\x1b[3%cm%s\x1b[39m", color, string); else fputs(string, MUNIT_OUTPUT_FILE); } #if !defined(MUNIT_NO_BUFFER) static int munit_replace_stderr(FILE* stderr_buf) { if (stderr_buf != NULL) { const int orig_stderr = dup(STDERR_FILENO); int errfd = fileno(stderr_buf); if (MUNIT_UNLIKELY(errfd == -1)) { exit(EXIT_FAILURE); } dup2(errfd, STDERR_FILENO); return orig_stderr; } return -1; } static void munit_restore_stderr(int orig_stderr) { if (orig_stderr != -1) { dup2(orig_stderr, STDERR_FILENO); close(orig_stderr); } } #endif /* !defined(MUNIT_NO_BUFFER) */ /* Run a test with the specified parameters. */ static void munit_test_runner_run_test_with_params(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[]) { MunitResult result = MUNIT_OK; MunitReport report = { 0, 0, 0, 0, #if defined(MUNIT_ENABLE_TIMING) 0, 0 #endif }; unsigned int output_l; bool first; const MunitParameter* param; FILE* stderr_buf; #if !defined(MUNIT_NO_FORK) int pipefd[2]; pid_t fork_pid; int orig_stderr; ssize_t bytes_written = 0; ssize_t write_res; ssize_t bytes_read = 0; ssize_t read_res; int status = 0; pid_t changed_pid; #endif if (params != NULL) { output_l = 2; fputs(" ", MUNIT_OUTPUT_FILE); first = true; for (param = params ; param != NULL && param->name != NULL ; param++) { if (!first) { fputs(", ", MUNIT_OUTPUT_FILE); output_l += 2; } else { first = false; } output_l += fprintf(MUNIT_OUTPUT_FILE, "%s=%s", param->name, param->value); } while (output_l++ < MUNIT_TEST_NAME_LEN) { fputc(' ', MUNIT_OUTPUT_FILE); } } fflush(MUNIT_OUTPUT_FILE); stderr_buf = NULL; #if !defined(_WIN32) || defined(__MINGW32__) stderr_buf = tmpfile(); #else tmpfile_s(&stderr_buf); #endif if (stderr_buf == NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create buffer for stderr"); result = MUNIT_ERROR; goto print_result; } #if !defined(MUNIT_NO_FORK) if (runner->fork) { pipefd[0] = -1; pipefd[1] = -1; if (pipe(pipefd) != 0) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create pipe"); result = MUNIT_ERROR; goto print_result; } fork_pid = fork(); if (fork_pid == 0) { close(pipefd[0]); orig_stderr = munit_replace_stderr(stderr_buf); munit_test_runner_exec(runner, test, params, &report); /* Note that we don't restore stderr. This is so we can buffer * things written to stderr later on (such as by * asan/tsan/ubsan, valgrind, etc.) */ close(orig_stderr); do { write_res = write(pipefd[1], ((munit_uint8_t*) (&report)) + bytes_written, sizeof(report) - bytes_written); if (write_res < 0) { if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to write to pipe"); } exit(EXIT_FAILURE); } bytes_written += write_res; } while ((size_t) bytes_written < sizeof(report)); if (stderr_buf != NULL) fclose(stderr_buf); close(pipefd[1]); exit(EXIT_SUCCESS); } else if (fork_pid == -1) { close(pipefd[0]); close(pipefd[1]); if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to fork"); } report.errored++; result = MUNIT_ERROR; } else { close(pipefd[1]); do { read_res = read(pipefd[0], ((munit_uint8_t*) (&report)) + bytes_read, sizeof(report) - bytes_read); if (read_res < 1) break; bytes_read += read_res; } while (bytes_read < (ssize_t) sizeof(report)); changed_pid = waitpid(fork_pid, &status, 0); if (MUNIT_LIKELY(changed_pid == fork_pid) && MUNIT_LIKELY(WIFEXITED(status))) { if (bytes_read != sizeof(report)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited unexpectedly with status %d", WEXITSTATUS(status)); report.errored++; } else if (WEXITSTATUS(status) != EXIT_SUCCESS) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited with status %d", WEXITSTATUS(status)); report.errored++; } } else { if (WIFSIGNALED(status)) { #if defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700) munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d (%s)", WTERMSIG(status), strsignal(WTERMSIG(status))); #else munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d", WTERMSIG(status)); #endif } else if (WIFSTOPPED(status)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child stopped by signal %d", WSTOPSIG(status)); } report.errored++; } close(pipefd[0]); waitpid(fork_pid, NULL, 0); } } else #endif { #if !defined(MUNIT_NO_BUFFER) const volatile int orig_stderr = munit_replace_stderr(stderr_buf); #endif #if defined(MUNIT_THREAD_LOCAL) if (MUNIT_UNLIKELY(setjmp(munit_error_jmp_buf) != 0)) { result = MUNIT_FAIL; report.failed++; } else { munit_error_jmp_buf_valid = true; result = munit_test_runner_exec(runner, test, params, &report); } #else result = munit_test_runner_exec(runner, test, params, &report); #endif #if !defined(MUNIT_NO_BUFFER) munit_restore_stderr(orig_stderr); #endif /* Here just so that the label is used on Windows and we don't get * a warning */ goto print_result; } print_result: fputs("[ ", MUNIT_OUTPUT_FILE); if ((test->options & MUNIT_TEST_OPTION_TODO) == MUNIT_TEST_OPTION_TODO) { if (report.failed != 0 || report.errored != 0 || report.skipped != 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_TODO, '3'); result = MUNIT_OK; } else { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); if (MUNIT_LIKELY(stderr_buf != NULL)) munit_log_internal(MUNIT_LOG_ERROR, stderr_buf, "Test marked TODO, but was successful."); runner->report.failed++; result = MUNIT_ERROR; } } else if (report.failed > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_FAIL, '1'); runner->report.failed++; result = MUNIT_FAIL; } else if (report.errored > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); runner->report.errored++; result = MUNIT_ERROR; } else if (report.skipped > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_SKIP, '3'); runner->report.skipped++; result = MUNIT_SKIP; } else if (report.successful > 1) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock / report.successful); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock / report.successful); fprintf(MUNIT_OUTPUT_FILE, " CPU ]\n %-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s Total: [ ", ""); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } else if (report.successful > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } fputs(" ]\n", MUNIT_OUTPUT_FILE); if (stderr_buf != NULL) { if (result == MUNIT_FAIL || result == MUNIT_ERROR || runner->show_stderr) { fflush(MUNIT_OUTPUT_FILE); rewind(stderr_buf); munit_splice(fileno(stderr_buf), STDERR_FILENO); fflush(stderr); } fclose(stderr_buf); } } static void munit_test_runner_run_test_wild(MunitTestRunner* runner, const MunitTest* test, const char* test_name, MunitParameter* params, MunitParameter* p) { const MunitParameterEnum* pe; char** values; MunitParameter* next; for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { if (p->name == pe->name) break; } if (pe == NULL) return; for (values = pe->values ; *values != NULL ; values++) { next = p + 1; p->value = *values; if (next->name == NULL) { munit_test_runner_run_test_with_params(runner, test, params); } else { munit_test_runner_run_test_wild(runner, test, test_name, params, next); } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) break; } } /* Run a single test, with every combination of parameters * requested. */ static void munit_test_runner_run_test(MunitTestRunner* runner, const MunitTest* test, const char* prefix) { char* test_name = munit_maybe_concat(NULL, (char*) prefix, (char*) test->name); /* The array of parameters to pass to * munit_test_runner_run_test_with_params */ MunitParameter* params = NULL; size_t params_l = 0; /* Wildcard parameters are parameters which have possible values * specified in the test, but no specific value was passed to the * CLI. That means we want to run the test once for every * possible combination of parameter values or, if --single was * passed to the CLI, a single time with a random set of * parameters. */ MunitParameter* wild_params = NULL; size_t wild_params_l = 0; const MunitParameterEnum* pe; const MunitParameter* cli_p; bool filled; unsigned int possible; char** vals; size_t first_wild; const MunitParameter* wp; int pidx; munit_rand_seed(runner->seed); fprintf(MUNIT_OUTPUT_FILE, "%-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s", test_name); if (test->parameters == NULL) { /* No parameters. Simple, nice. */ munit_test_runner_run_test_with_params(runner, test, NULL); } else { fputc('\n', MUNIT_OUTPUT_FILE); for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { /* Did we received a value for this parameter from the CLI? */ filled = false; for (cli_p = runner->parameters ; cli_p != NULL && cli_p->name != NULL ; cli_p++) { if (strcmp(cli_p->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, cli_p->value) != MUNIT_OK)) goto cleanup; filled = true; break; } } if (filled) continue; /* Nothing from CLI, is the enum NULL/empty? We're not a * fuzzer… */ if (pe->values == NULL || pe->values[0] == NULL) continue; /* If --single was passed to the CLI, choose a value from the * list of possibilities randomly. */ if (runner->single_parameter_mode) { possible = 0; for (vals = pe->values ; *vals != NULL ; vals++) possible++; /* We want the tests to be reproducible, even if you're only * running a single test, but we don't want every test with * the same number of parameters to choose the same parameter * number, so use the test name as a primitive salt. */ pidx = munit_rand_at_most(munit_str_hash(test_name), possible - 1); if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[pidx]) != MUNIT_OK)) goto cleanup; } else { /* We want to try every permutation. Put in a placeholder * entry, we'll iterate through them later. */ if (MUNIT_UNLIKELY(munit_parameters_add(&wild_params_l, &wild_params, pe->name, NULL) != MUNIT_OK)) goto cleanup; } } if (wild_params_l != 0) { first_wild = params_l; for (wp = wild_params ; wp != NULL && wp->name != NULL ; wp++) { for (pe = test->parameters ; pe != NULL && pe->name != NULL && pe->values != NULL ; pe++) { if (strcmp(wp->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[0]) != MUNIT_OK)) goto cleanup; } } } munit_test_runner_run_test_wild(runner, test, test_name, params, params + first_wild); } else { munit_test_runner_run_test_with_params(runner, test, params); } cleanup: free(params); free(wild_params); } munit_maybe_free_concat(test_name, prefix, test->name); } /* Recurse through the suite and run all the tests. If a list of * tests to run was provied on the command line, run only those * tests. */ static void munit_test_runner_run_suite(MunitTestRunner* runner, const MunitSuite* suite, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const char** test_name; const MunitSuite* child_suite; /* Run the tests. */ for (test = suite->tests ; test != NULL && test->test != NULL ; test++) { if (runner->tests != NULL) { /* Specific tests were requested on the CLI */ for (test_name = runner->tests ; test_name != NULL && *test_name != NULL ; test_name++) { if ((pre_l == 0 || strncmp(pre, *test_name, pre_l) == 0) && strncmp(test->name, *test_name + pre_l, strlen(*test_name + pre_l)) == 0) { munit_test_runner_run_test(runner, test, pre); if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; } } } else { /* Run all tests */ munit_test_runner_run_test(runner, test, pre); } } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; /* Run any child suites. */ for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) { munit_test_runner_run_suite(runner, child_suite, pre); } cleanup: munit_maybe_free_concat(pre, prefix, suite->prefix); } static void munit_test_runner_run(MunitTestRunner* runner) { munit_test_runner_run_suite(runner, runner->suite, NULL); } static void munit_print_help(int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], void* user_data, const MunitArgument arguments[]) { const MunitArgument* arg; (void) argc; printf("USAGE: %s [OPTIONS...] [TEST...]\n\n", argv[0]); puts(" --seed SEED\n" " Value used to seed the PRNG. Must be a 32-bit integer in decimal\n" " notation with no separators (commas, decimals, spaces, etc.), or\n" " hexidecimal prefixed by \"0x\".\n" " --iterations N\n" " Run each test N times. 0 means the default number.\n" " --param name value\n" " A parameter key/value pair which will be passed to any test with\n" " takes a parameter of that name. If not provided, the test will be\n" " run once for each possible parameter value.\n" " --list Write a list of all available tests.\n" " --list-params\n" " Write a list of all available tests and their possible parameters.\n" " --single Run each parameterized test in a single configuration instead of\n" " every possible combination\n" " --log-visible debug|info|warning|error\n" " --log-fatal debug|info|warning|error\n" " Set the level at which messages of different severities are visible,\n" " or cause the test to terminate.\n" #if !defined(MUNIT_NO_FORK) " --no-fork Do not execute tests in a child process. If this option is supplied\n" " and a test crashes (including by failing an assertion), no further\n" " tests will be performed.\n" #endif " --fatal-failures\n" " Stop executing tests as soon as a failure is found.\n" " --show-stderr\n" " Show data written to stderr by the tests, even if the test succeeds.\n" " --color auto|always|never\n" " Colorize (or don't) the output.\n" /* 12345678901234567890123456789012345678901234567890123456789012345678901234567890 */ " --help Print this help message and exit.\n"); #if defined(MUNIT_NL_LANGINFO) setlocale(LC_ALL, ""); fputs((strcasecmp("UTF-8", nl_langinfo(CODESET)) == 0) ? "µnit" : "munit", stdout); #else puts("munit"); #endif printf(" %d.%d.%d\n" "Full documentation at: https://nemequ.github.io/munit/\n", (MUNIT_CURRENT_VERSION >> 16) & 0xff, (MUNIT_CURRENT_VERSION >> 8) & 0xff, (MUNIT_CURRENT_VERSION >> 0) & 0xff); for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) arg->write_help(arg, user_data); } static const MunitArgument* munit_arguments_find(const MunitArgument arguments[], const char* name) { const MunitArgument* arg; for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) if (strcmp(arg->name, name) == 0) return arg; return NULL; } static void munit_suite_list_tests(const MunitSuite* suite, bool show_params, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const MunitParameterEnum* params; bool first; char** val; const MunitSuite* child_suite; for (test = suite->tests ; test != NULL && test->name != NULL ; test++) { if (pre != NULL) fputs(pre, stdout); puts(test->name); if (show_params) { for (params = test->parameters ; params != NULL && params->name != NULL ; params++) { fprintf(stdout, " - %s: ", params->name); if (params->values == NULL) { puts("Any"); } else { first = true; for (val = params->values ; *val != NULL ; val++ ) { if(!first) { fputs(", ", stdout); } else { first = false; } fputs(*val, stdout); } putc('\n', stdout); } } } } for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) { munit_suite_list_tests(child_suite, show_params, pre); } munit_maybe_free_concat(pre, prefix, suite->prefix); } static bool munit_stream_supports_ansi(FILE *stream) { #if !defined(_WIN32) return isatty(fileno(stream)); #else #if !defined(__MINGW32__) size_t ansicon_size = 0; #endif if (isatty(fileno(stream))) { #if !defined(__MINGW32__) getenv_s(&ansicon_size, NULL, 0, "ANSICON"); return ansicon_size != 0; #else return getenv("ANSICON") != NULL; #endif } return false; #endif } int munit_suite_main_custom(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], const MunitArgument arguments[]) { int result = EXIT_FAILURE; MunitTestRunner runner; size_t parameters_size = 0; size_t tests_size = 0; int arg; char* envptr; unsigned long ts; char* endptr; unsigned long long iterations; MunitLogLevel level; const MunitArgument* argument; const char** runner_tests; unsigned int tests_run; unsigned int tests_total; runner.prefix = NULL; runner.suite = NULL; runner.tests = NULL; runner.seed = 0; runner.iterations = 0; runner.parameters = NULL; runner.single_parameter_mode = false; runner.user_data = NULL; runner.report.successful = 0; runner.report.skipped = 0; runner.report.failed = 0; runner.report.errored = 0; #if defined(MUNIT_ENABLE_TIMING) runner.report.cpu_clock = 0; runner.report.wall_clock = 0; #endif runner.colorize = false; #if !defined(_WIN32) runner.fork = true; #else runner.fork = false; #endif runner.show_stderr = false; runner.fatal_failures = false; runner.suite = suite; runner.user_data = user_data; runner.seed = munit_rand_generate_seed(); runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); for (arg = 1 ; arg < argc ; arg++) { if (strncmp("--", argv[arg], 2) == 0) { if (strcmp("seed", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } envptr = argv[arg + 1]; ts = strtoul(argv[arg + 1], &envptr, 0); if (*envptr != '\0' || ts > (~((munit_uint32_t) 0U))) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.seed = (munit_uint32_t) ts; arg++; } else if (strcmp("iterations", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } endptr = argv[arg + 1]; iterations = strtoul(argv[arg + 1], &endptr, 0); if (*endptr != '\0' || iterations > UINT_MAX) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.iterations = (unsigned int) iterations; arg++; } else if (strcmp("param", argv[arg] + 2) == 0) { if (arg + 2 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires two arguments", argv[arg]); goto cleanup; } runner.parameters = realloc(runner.parameters, sizeof(MunitParameter) * (parameters_size + 2)); if (runner.parameters == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.parameters[parameters_size].name = (char*) argv[arg + 1]; runner.parameters[parameters_size].value = (char*) argv[arg + 2]; parameters_size++; runner.parameters[parameters_size].name = NULL; runner.parameters[parameters_size].value = NULL; arg += 2; } else if (strcmp("color", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "always") == 0) runner.colorize = true; else if (strcmp(argv[arg + 1], "never") == 0) runner.colorize = false; else if (strcmp(argv[arg + 1], "auto") == 0) runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } arg++; } else if (strcmp("help", argv[arg] + 2) == 0) { munit_print_help(argc, argv, user_data, arguments); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("single", argv[arg] + 2) == 0) { runner.single_parameter_mode = true; } else if (strcmp("show-stderr", argv[arg] + 2) == 0) { runner.show_stderr = true; #if !defined(_WIN32) } else if (strcmp("no-fork", argv[arg] + 2) == 0) { runner.fork = false; #endif } else if (strcmp("fatal-failures", argv[arg] + 2) == 0) { runner.fatal_failures = true; } else if (strcmp("log-visible", argv[arg] + 2) == 0 || strcmp("log-fatal", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "debug") == 0) level = MUNIT_LOG_DEBUG; else if (strcmp(argv[arg + 1], "info") == 0) level = MUNIT_LOG_INFO; else if (strcmp(argv[arg + 1], "warning") == 0) level = MUNIT_LOG_WARNING; else if (strcmp(argv[arg + 1], "error") == 0) level = MUNIT_LOG_ERROR; else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } if (strcmp("log-visible", argv[arg] + 2) == 0) munit_log_level_visible = level; else munit_log_level_fatal = level; arg++; } else if (strcmp("list", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, false, NULL); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("list-params", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, true, NULL); result = EXIT_SUCCESS; goto cleanup; } else { argument = munit_arguments_find(arguments, argv[arg] + 2); if (argument == NULL) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "unknown argument ('%s')", argv[arg]); goto cleanup; } if (!argument->parse_argument(suite, user_data, &arg, argc, argv)) goto cleanup; } } else { runner_tests = realloc((void*) runner.tests, sizeof(char*) * (tests_size + 2)); if (runner_tests == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.tests = runner_tests; runner.tests[tests_size++] = argv[arg]; runner.tests[tests_size] = NULL; } } fflush(stderr); fprintf(MUNIT_OUTPUT_FILE, "Running test suite with seed 0x%08" PRIx32 "...\n", runner.seed); munit_test_runner_run(&runner); tests_run = runner.report.successful + runner.report.failed + runner.report.errored; tests_total = tests_run + runner.report.skipped; if (tests_run == 0) { fprintf(stderr, "No tests run, %d (100%%) skipped.\n", runner.report.skipped); } else { fprintf(MUNIT_OUTPUT_FILE, "%d of %d (%0.0f%%) tests successful, %d (%0.0f%%) test skipped.\n", runner.report.successful, tests_run, (((double) runner.report.successful) / ((double) tests_run)) * 100.0, runner.report.skipped, (((double) runner.report.skipped) / ((double) tests_total)) * 100.0); } if (runner.report.failed == 0 && runner.report.errored == 0) { result = EXIT_SUCCESS; } cleanup: free(runner.parameters); free((void*) runner.tests); return result; } int munit_suite_main(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)]) { return munit_suite_main_custom(suite, user_data, argc, argv, NULL); }
/*** Configuration ***/ /* * This is just where the output from the test goes. It's really just meant * to let you choose stdout or stderr, but if anyone really want to direct it * to a file let me know, it would be fairly easy to support. */ #if !defined(MUNIT_OUTPUT_FILE) #define MUNIT_OUTPUT_FILE stdout #endif /* * This is a bit more useful; it tells µnit how to format the seconds in * timed tests. If your tests run for longer you might want to reduce it, * and if your computer is really fast and your tests are tiny you can * increase it. */ #if !defined(MUNIT_TEST_TIME_FORMAT) #define MUNIT_TEST_TIME_FORMAT "0.8f" #endif /* * If you have long test names you might want to consider bumping this. The * result information takes 43 characters. */ #if !defined(MUNIT_TEST_NAME_LEN) #define MUNIT_TEST_NAME_LEN 37 #endif /* * If you don't like the timing information, you can disable it by defining * MUNIT_DISABLE_TIMING. */ #if !defined(MUNIT_DISABLE_TIMING) #define MUNIT_ENABLE_TIMING #endif /*** End configuration ***/ #if defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE < 200809L) #undef _POSIX_C_SOURCE #endif #if !defined(_POSIX_C_SOURCE) #define _POSIX_C_SOURCE 200809L #endif /* * Solaris freaks out if you try to use a POSIX or SUS standard without the * "right" C standard. */ #if defined(_XOPEN_SOURCE) #undef _XOPEN_SOURCE #endif #if defined(__STDC_VERSION__) #if __STDC_VERSION__ >= 201112L #define _XOPEN_SOURCE 700 #elif __STDC_VERSION__ >= 199901L #define _XOPEN_SOURCE 600 #endif #endif /* * Because, according to Microsoft, POSIX is deprecated. You've got to * appreciate the chutzpah. */ #if defined(_MSC_VER) && !defined(_CRT_NONSTDC_NO_DEPRECATE) #define _CRT_NONSTDC_NO_DEPRECATE #endif #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) #include <stdbool.h> #elif defined(_WIN32) /* https://msdn.microsoft.com/en-us/library/tf4dy80a.aspx */ #endif #include <limits.h> #include <time.h> #include <errno.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <setjmp.h> #if !defined(MUNIT_NO_NL_LANGINFO) && !defined(_WIN32) #define MUNIT_NL_LANGINFO #include <locale.h> #include <langinfo.h> #include <strings.h> #endif #if !defined(_WIN32) #include <unistd.h> #include <sys/types.h> #include <sys/wait.h> #else #include <windows.h> #include <io.h> #include <fcntl.h> #if !defined(STDERR_FILENO) #define STDERR_FILENO _fileno(stderr) #endif #endif #include "munit.h" #define MUNIT_STRINGIFY(x) #x #define MUNIT_XSTRINGIFY(x) MUNIT_STRINGIFY(x) #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) || defined(_Thread_local) #define MUNIT_THREAD_LOCAL _Thread_local #elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__) #define MUNIT_THREAD_LOCAL __thread #elif defined(_WIN32) #define MUNIT_THREAD_LOCAL __declspec(thread) #endif /* * MSVC 12.0 will emit a warning at /W4 for code like 'do { ... } while (0)', * or 'do { ... } while (true)'. I'm pretty sure nobody at Microsoft * compiles with /W4. */ #if defined(_MSC_VER) && (_MSC_VER <= 1800) #pragma warning(disable: 4127) #endif #if defined(_WIN32) || defined(__EMSCRIPTEN__) #define MUNIT_NO_FORK #endif #if defined(__EMSCRIPTEN__) #define MUNIT_NO_BUFFER #endif /*** Logging ***/ static MunitLogLevel munit_log_level_visible = MUNIT_LOG_INFO; static MunitLogLevel munit_log_level_fatal = MUNIT_LOG_ERROR; #if defined(MUNIT_THREAD_LOCAL) static MUNIT_THREAD_LOCAL bool munit_error_jmp_buf_valid = false; static MUNIT_THREAD_LOCAL jmp_buf munit_error_jmp_buf; #endif /* * At certain warning levels, mingw will trigger warnings about suggesting * the format attribute, which we've explicity *not* set because it will then * choke on our attempts to use the MS-specific I64 modifier for size_t * (which we have to use since MSVC doesn't support the C99 z modifier). */ #if defined(__MINGW32__) || defined(__MINGW64__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wsuggest-attribute=format" #endif MUNIT_PRINTF(5, 0) static void munit_logf_exv(MunitLogLevel level, FILE * fp, const char *filename, int line, const char *format, va_list ap) { if (level < munit_log_level_visible) return; switch (level) { case MUNIT_LOG_DEBUG: fputs("Debug", fp); break; case MUNIT_LOG_INFO: fputs("Info", fp); break; case MUNIT_LOG_WARNING: fputs("Warning", fp); break; case MUNIT_LOG_ERROR: fputs("Error", fp); break; default: munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Invalid log level (%d)", level); return; } fputs(": ", fp); if (filename != NULL) fprintf(fp, "%s:%d: ", filename, line); vfprintf(fp, format, ap); fputc('\n', fp); } MUNIT_PRINTF(3, 4) static void munit_logf_internal(MunitLogLevel level, FILE * fp, const char *format,...) { va_list ap; va_start(ap, format); munit_logf_exv(level, fp, NULL, 0, format, ap); va_end(ap); } static void munit_log_internal(MunitLogLevel level, FILE * fp, const char *message) { munit_logf_internal(level, fp, "%s", message); } void munit_logf_ex(MunitLogLevel level, const char *filename, int line, const char *format,...) { va_list ap; va_start(ap, format); munit_logf_exv(level, stderr, filename, line, format, ap); va_end(ap); if (level >= munit_log_level_fatal) { #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } } void munit_errorf_ex(const char *filename, int line, const char *format,...) { va_list ap; va_start(ap, format); munit_logf_exv(MUNIT_LOG_ERROR, stderr, filename, line, format, ap); va_end(ap); #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } #if defined(__MINGW32__) || defined(__MINGW64__) #pragma GCC diagnostic pop #endif #if !defined(MUNIT_STRERROR_LEN) #define MUNIT_STRERROR_LEN 80 #endif static void munit_log_errno(MunitLogLevel level, FILE * fp, const char *msg) { #if defined(MUNIT_NO_STRERROR_R) || (defined(__MINGW32__) && !defined(MINGW_HAS_SECURE_API)) munit_logf_internal(level, fp, "%s: %s (%d)", msg, strerror(errno), errno); #else char munit_error_str[MUNIT_STRERROR_LEN]; munit_error_str[0] = '\0'; #if !defined(_WIN32) strerror_r(errno, munit_error_str, MUNIT_STRERROR_LEN); #else strerror_s(munit_error_str, MUNIT_STRERROR_LEN, errno); #endif munit_logf_internal(level, fp, "%s: %s (%d)", msg, munit_error_str, errno); #endif } /*** Memory allocation ***/ void * munit_malloc_ex(const char *filename, int line, size_t size) { void *ptr; if (size == 0) return NULL; ptr = calloc(1, size); if (MUNIT_UNLIKELY(ptr == NULL)) { munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Failed to allocate %" MUNIT_SIZE_MODIFIER "u bytes.", size); } return ptr; } /*** Timer code ***/ #if defined(MUNIT_ENABLE_TIMING) #define psnip_uint64_t munit_uint64_t #define psnip_uint32_t munit_uint32_t /* * Code copied from portable-snippets * <https://github.com/nemequ/portable-snippets/>. If you need to change * something, please do it there so we can keep the code in sync. */ /* * Clocks (v1) Portable Snippets - https://gitub.com/nemequ/portable-snippets * Created by Evan Nemerson <evan@nemerson.com> * * To the extent possible under law, the authors have waived all copyright and * related or neighboring rights to this code. For details, see the Creative * Commons Zero 1.0 Universal license at * https://creativecommons.org/publicdomain/zero/1.0/ */ #if !defined(PSNIP_CLOCK_H) #define PSNIP_CLOCK_H #if !defined(psnip_uint64_t) #include "../exact-int/exact-int.h" #endif #if !defined(PSNIP_CLOCK_STATIC_INLINE) #if defined(__GNUC__) #define PSNIP_CLOCK__COMPILER_ATTRIBUTES __attribute__((__unused__)) #else #define PSNIP_CLOCK__COMPILER_ATTRIBUTES #endif #define PSNIP_CLOCK__FUNCTION PSNIP_CLOCK__COMPILER_ATTRIBUTES static #endif enum PsnipClockType { /* * This clock provides the current time, in units since 1970-01-01 * 00:00:00 UTC not including leap seconds. In other words, UNIX time. * Keep in mind that this clock doesn't account for leap seconds, and can * go backwards (think NTP adjustments). */ PSNIP_CLOCK_TYPE_WALL = 1, /* * The CPU time is a clock which increases only when the current process * is active (i.e., it doesn't increment while blocking on I/O). */ PSNIP_CLOCK_TYPE_CPU = 2, /* * Monotonic time is always running (unlike CPU time), but it only ever * moves forward unless you reboot the system. Things like NTP * adjustments have no effect on this clock. */ PSNIP_CLOCK_TYPE_MONOTONIC = 3 }; struct PsnipClockTimespec { psnip_uint64_t seconds; psnip_uint64_t nanoseconds; }; /* Methods we support: */ #define PSNIP_CLOCK_METHOD_CLOCK_GETTIME 1 #define PSNIP_CLOCK_METHOD_TIME 2 #define PSNIP_CLOCK_METHOD_GETTIMEOFDAY 3 #define PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER 4 #define PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME 5 #define PSNIP_CLOCK_METHOD_CLOCK 6 #define PSNIP_CLOCK_METHOD_GETPROCESSTIMES 7 #define PSNIP_CLOCK_METHOD_GETRUSAGE 8 #define PSNIP_CLOCK_METHOD_GETSYSTEMTIMEPRECISEASFILETIME 9 #define PSNIP_CLOCK_METHOD_GETTICKCOUNT64 10 #include <assert.h> #if defined(HEDLEY_UNREACHABLE) #define PSNIP_CLOCK_UNREACHABLE() HEDLEY_UNREACHABLE() #else #define PSNIP_CLOCK_UNREACHABLE() assert(0) #endif /* Choose an implementation */ /* #undef PSNIP_CLOCK_WALL_METHOD */ /* #undef PSNIP_CLOCK_CPU_METHOD */ /* #undef PSNIP_CLOCK_MONOTONIC_METHOD */ /* * We want to be able to detect the libc implementation, so we include * <limits.h> (<features.h> isn't available everywhere). */ #if defined(__unix__) || defined(__unix) || defined(__linux__) #include <limits.h> #include <unistd.h> #endif #if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) /* * These are known to work without librt. If you know of others please let * us know so we can add them. */ #if \ (defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17))) || \ (defined(__FreeBSD__)) #define PSNIP_CLOCK_HAVE_CLOCK_GETTIME #elif !defined(PSNIP_CLOCK_NO_LIBRT) #define PSNIP_CLOCK_HAVE_CLOCK_GETTIME #endif #endif #if defined(_WIN32) #if !defined(PSNIP_CLOCK_CPU_METHOD) #define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_GETPROCESSTIMES #endif #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) #define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER #endif #endif #if defined(__MACH__) && !defined(__gnu_hurd__) #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) #define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME #endif #endif #if defined(PSNIP_CLOCK_HAVE_CLOCK_GETTIME) #include <time.h> #if !defined(PSNIP_CLOCK_WALL_METHOD) #if defined(CLOCK_REALTIME_PRECISE) #define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME #define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME_PRECISE #elif !defined(__sun) #define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME #define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME #endif #endif #if !defined(PSNIP_CLOCK_CPU_METHOD) #if defined(_POSIX_CPUTIME) || defined(CLOCK_PROCESS_CPUTIME_ID) #define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME #define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_PROCESS_CPUTIME_ID #elif defined(CLOCK_VIRTUAL) #define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME #define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_VIRTUAL #endif #endif #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) #if defined(CLOCK_MONOTONIC_RAW) #define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME #define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC #elif defined(CLOCK_MONOTONIC_PRECISE) #define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME #define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC_PRECISE #elif defined(_POSIX_MONOTONIC_CLOCK) || defined(CLOCK_MONOTONIC) #define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME #define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC #endif #endif #endif #if defined(_POSIX_VERSION) && (_POSIX_VERSION >= 200112L) #if !defined(PSNIP_CLOCK_WALL_METHOD) #define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_GETTIMEOFDAY #endif #endif #if !defined(PSNIP_CLOCK_WALL_METHOD) #define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_TIME #endif #if !defined(PSNIP_CLOCK_CPU_METHOD) #define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK #endif /* Primarily here for testing. */ #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) && defined(PSNIP_CLOCK_REQUIRE_MONOTONIC) #error No monotonic clock found. #endif /* Implementations */ #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_TIME)) #include <time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) #include <sys/time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) #include <windows.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) #include <sys/time.h> #include <sys/resource.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) #include <CoreServices/CoreServices.h> #include <mach/mach.h> #include <mach/mach_time.h> #endif /*** Implementations ***/ #define PSNIP_CLOCK_NSEC_PER_SEC ((psnip_uint32_t) (1000000000ULL)) #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock__clock_getres(clockid_t clk_id) { struct timespec res; int r; r = clock_getres(clk_id, &res); if (r != 0) return 0; return (psnip_uint32_t) (PSNIP_CLOCK_NSEC_PER_SEC / res.tv_nsec); } PSNIP_CLOCK__FUNCTION int psnip_clock__clock_gettime(clockid_t clk_id, struct PsnipClockTimespec *res) { struct timespec ts; if (clock_gettime(clk_id, &ts) != 0) return -10; res->seconds = (psnip_uint64_t) (ts.tv_sec); res->nanoseconds = (psnip_uint64_t) (ts.tv_nsec); return 0; } #endif PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_wall_get_precision(void) { #if !defined(PSNIP_CLOCK_WALL_METHOD) return 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_WALL); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY return 1000000; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME return 1; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_wall_get_time(struct PsnipClockTimespec *res) { (void)res; #if !defined(PSNIP_CLOCK_WALL_METHOD) return -2; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_WALL, res); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME res->seconds = time(NULL); res->nanoseconds = 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY struct timeval tv; if (gettimeofday(&tv, NULL) != 0) return -6; res->seconds = tv.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_cpu_get_precision(void) { #if !defined(PSNIP_CLOCK_CPU_METHOD) return 0; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_CPU); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK return CLOCKS_PER_SEC; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES return PSNIP_CLOCK_NSEC_PER_SEC / 100; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_cpu_get_time(struct PsnipClockTimespec *res) { #if !defined(PSNIP_CLOCK_CPU_METHOD) (void)res; return -2; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_CPU, res); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK clock_t t = clock(); if (t == ((clock_t) - 1)) return -5; res->seconds = t / CLOCKS_PER_SEC; res->nanoseconds = (t % CLOCKS_PER_SEC) * (PSNIP_CLOCK_NSEC_PER_SEC / CLOCKS_PER_SEC); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES FILETIME CreationTime, ExitTime, KernelTime, UserTime; LARGE_INTEGER date, adjust; if (!GetProcessTimes(GetCurrentProcess(), &CreationTime, &ExitTime, &KernelTime, &UserTime)) return -7; /* http://www.frenk.com/2009/12/convert-filetime-to-unix-timestamp/ */ date.HighPart = UserTime.dwHighDateTime; date.LowPart = UserTime.dwLowDateTime; adjust.QuadPart = 11644473600000 * 10000; date.QuadPart -= adjust.QuadPart; res->seconds = date.QuadPart / 10000000; res->nanoseconds = (date.QuadPart % 10000000) * (PSNIP_CLOCK_NSEC_PER_SEC / 100); #elif PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE struct rusage usage; if (getrusage(RUSAGE_SELF, &usage) != 0) return -8; res->seconds = usage.ru_utime.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else (void)res; return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_monotonic_get_precision(void) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) return 0; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME static mach_timebase_info_data_t tbi = {0,}; if (tbi.denom == 0) mach_timebase_info(&tbi); return (psnip_uint32_t) (tbi.numer / tbi.denom); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 return 1000; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER Frequency; QueryPerformanceFrequency(&Frequency); return (psnip_uint32_t) ((Frequency.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) ? PSNIP_CLOCK_NSEC_PER_SEC : Frequency.QuadPart); #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_monotonic_get_time(struct PsnipClockTimespec *res) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) (void)res; return -2; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC, res); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME psnip_uint64_t nsec = mach_absolute_time(); static mach_timebase_info_data_t tbi = {0,}; if (tbi.denom == 0) mach_timebase_info(&tbi); nsec *= ((psnip_uint64_t) tbi.numer) / ((psnip_uint64_t) tbi.denom); res->seconds = nsec / PSNIP_CLOCK_NSEC_PER_SEC; res->nanoseconds = nsec % PSNIP_CLOCK_NSEC_PER_SEC; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER t, f; if (QueryPerformanceCounter(&t) == 0) return -12; QueryPerformanceFrequency(&f); res->seconds = t.QuadPart / f.QuadPart; res->nanoseconds = t.QuadPart % f.QuadPart; if (f.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) res->nanoseconds /= f.QuadPart / PSNIP_CLOCK_NSEC_PER_SEC; else res->nanoseconds *= PSNIP_CLOCK_NSEC_PER_SEC / f.QuadPart; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 const ULONGLONG msec = GetTickCount64(); res->seconds = msec / 1000; res->nanoseconds = sec % 1000; #else return -2; #endif return 0; } /* * Returns the number of ticks per second for the specified clock. For * example, a clock with millisecond precision would return 1000, and a clock * with 1 second (such as the time() function) would return 1. * * If the requested clock isn't available, it will return 0. Hopefully this will * be rare, but if it happens to you please let us know so we can work on * finding a way to support your system. * * Note that different clocks on the same system often have a different * precisions. */ PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_get_precision(enum PsnipClockType clock_type) { switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_precision(); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_precision(); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_precision(); } PSNIP_CLOCK_UNREACHABLE(); return 0; } /* * Set the provided timespec to the requested time. Returns 0 on success, or * a negative value on failure. */ PSNIP_CLOCK__FUNCTION int psnip_clock_get_time(enum PsnipClockType clock_type, struct PsnipClockTimespec *res) { assert(res != NULL); switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_time(res); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_time(res); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_time(res); } return -1; } #endif /* !defined(PSNIP_CLOCK_H) */ static psnip_uint64_t munit_clock_get_elapsed(struct PsnipClockTimespec *start, struct PsnipClockTimespec *end) { psnip_uint64_t r = (end->seconds - start->seconds) * PSNIP_CLOCK_NSEC_PER_SEC; if (end->nanoseconds < start->nanoseconds) { r -= (start->nanoseconds - end->nanoseconds); } else { r += (end->nanoseconds - start->nanoseconds); } return r; } #endif /* defined(MUNIT_ENABLE_TIMING) */ /*** PRNG stuff ***/ /* * This is (unless I screwed up, which is entirely possible) the version of * PCG with 32-bit state. It was chosen because it has a small enough state * that we should reliably be able to use CAS instead of requiring a lock for * thread-safety. * * If I did screw up, I probably will not bother changing it unless there is a * significant bias. It's really not important this be particularly strong, * as long as it is fairly random it's much more important that it be * reproducible, so bug reports have a better chance of being reproducible. */ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) && !defined(__EMSCRIPTEN__) #define HAVE_STDATOMIC #elif defined(__clang__) #if __has_extension(c_atomic) #define HAVE_CLANG_ATOMICS #endif #endif /* Workaround for http://llvm.org/bugs/show_bug.cgi?id=26911 */ #if defined(__clang__) && defined(_WIN32) #undef HAVE_STDATOMIC #if defined(__c2__) #undef HAVE_CLANG_ATOMICS #endif #endif static ATOMIC_UINT32_T munit_rand_state = ATOMIC_UINT32_INIT(42); #define MUNIT_PRNG_MULTIPLIER (747796405U) #define MUNIT_PRNG_INCREMENT (1729U) static munit_uint32_t munit_rand_next_state(munit_uint32_t state) { return state * MUNIT_PRNG_MULTIPLIER + MUNIT_PRNG_INCREMENT; } static munit_uint32_t munit_rand_from_state(munit_uint32_t state) { munit_uint32_t res = ((state >> ((state >> 28) + 4)) ^ state) * (277803737U); res ^= res >> 22; return res; } void munit_rand_seed(munit_uint32_t seed) { munit_uint32_t state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); munit_atomic_store(&munit_rand_state, state); } static munit_uint32_t munit_rand_generate_seed(void) { struct PsnipClockTimespec wc = {0,}; munit_uint32_t seed, state; psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wc); seed = (munit_uint32_t) wc.nanoseconds; state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); return munit_rand_from_state(state); } static munit_uint32_t munit_rand_state_uint32(munit_uint32_t * state) { const munit_uint32_t old = *state; *state = munit_rand_next_state(old); return munit_rand_from_state(old); } munit_uint32_t munit_rand_uint32(void) { munit_uint32_t old, state; do { old = munit_atomic_load(&munit_rand_state); state = munit_rand_next_state(old); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return munit_rand_from_state(old); } static void munit_rand_state_memory(munit_uint32_t * state, size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { size_t members_remaining = size / sizeof(munit_uint32_t); size_t bytes_remaining = size % sizeof(munit_uint32_t); munit_uint8_t *b = data; munit_uint32_t rv; while (members_remaining-- > 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, sizeof(munit_uint32_t)); b += sizeof(munit_uint32_t); } if (bytes_remaining != 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, bytes_remaining); } } void munit_rand_memory(size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { munit_uint32_t old, state; do { state = old = munit_atomic_load(&munit_rand_state); munit_rand_state_memory(&state, size, data); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); } static munit_uint32_t munit_rand_state_at_most(munit_uint32_t * state, munit_uint32_t salt, munit_uint32_t max) { /* * We want (UINT32_MAX + 1) % max, which in unsigned arithmetic is the * same as (UINT32_MAX + 1 - max) % max = -max % max. We compute -max * using not to avoid compiler warnings. */ const munit_uint32_t min = (~max + 1U) % max; munit_uint32_t x; if (max == (~((munit_uint32_t) 0U))) return munit_rand_state_uint32(state) ^ salt; max++; do { x = munit_rand_state_uint32(state) ^ salt; } while (x < min); return x % max; } static munit_uint32_t munit_rand_at_most(munit_uint32_t salt, munit_uint32_t max) { munit_uint32_t old, state; munit_uint32_t retval; do { state = old = munit_atomic_load(&munit_rand_state); retval = munit_rand_state_at_most(&state, salt, max); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } int munit_rand_int_range(int min, int max) { munit_uint64_t range = (munit_uint64_t) max - (munit_uint64_t) min; if (min > max) return munit_rand_int_range(max, min); if (range > (~((munit_uint32_t) 0U))) range = (~((munit_uint32_t) 0U)); return min + munit_rand_at_most(0, (munit_uint32_t) range); } double munit_rand_double(void) { munit_uint32_t old, state; double retval = 0.0; do { state = old = munit_atomic_load(&munit_rand_state); /* * See http://mumble.net/~campbell/tmp/random_real.c for how to do * this right. Patches welcome if you feel that this is too biased. */ retval = munit_rand_state_uint32(&state) / ((~((munit_uint32_t) 0U)) + 1.0); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } /*** Test suite handling ***/ typedef struct { unsigned int successful; unsigned int skipped; unsigned int failed; unsigned int errored; #if defined(MUNIT_ENABLE_TIMING) munit_uint64_t cpu_clock; munit_uint64_t wall_clock; #endif } MunitReport; typedef struct { const char *prefix; const MunitSuite *suite; const char **tests; munit_uint32_t seed; unsigned int iterations; MunitParameter *parameters; bool single_parameter_mode; void *user_data; MunitReport report; bool colorize; bool fork; bool show_stderr; bool fatal_failures; } MunitTestRunner; const char * munit_parameters_get(const MunitParameter params[], const char *key) { const MunitParameter *param; for (param = params; param != NULL && param->name != NULL; param++) if (strcmp(param->name, key) == 0) return param->value; return NULL; } static void munit_print_time(FILE * fp, munit_uint64_t nanoseconds) { fprintf(fp, "%" MUNIT_TEST_TIME_FORMAT, ((double)nanoseconds) / ((double)PSNIP_CLOCK_NSEC_PER_SEC)); } /* Add a paramter to an array of parameters. */ static MunitResult munit_parameters_add(size_t * params_size, MunitParameter * params[MUNIT_ARRAY_PARAM(*params_size)], char *name, char *value) { *params = realloc(*params, sizeof(MunitParameter) * (*params_size + 2)); if (*params == NULL) return MUNIT_ERROR; (*params)[*params_size].name = name; (*params)[*params_size].value = value; (*params_size)++; (*params)[*params_size].name = NULL; (*params)[*params_size].value = NULL; return MUNIT_OK; } /* * Concatenate two strings, but just return one of the components unaltered * if the other is NULL or "". */ static char * munit_maybe_concat(size_t * len, char *prefix, char *suffix) { char *res; size_t res_l; const size_t prefix_l = prefix != NULL ? strlen(prefix) : 0; const size_t suffix_l = suffix != NULL ? strlen(suffix) : 0; if (prefix_l == 0 && suffix_l == 0) { res = NULL; res_l = 0; } else if (prefix_l == 0 && suffix_l != 0) { res = suffix; res_l = suffix_l; } else if (prefix_l != 0 && suffix_l == 0) { res = prefix; res_l = prefix_l; } else { res_l = prefix_l + suffix_l; res = malloc(res_l + 1); memcpy(res, prefix, prefix_l); memcpy(res + prefix_l, suffix, suffix_l); res[res_l] = 0; } if (len != NULL) *len = res_l; return res; } /* Possbily free a string returned by munit_maybe_concat. */ static void munit_maybe_free_concat(char *s, const char *prefix, const char *suffix) { if (prefix != s && suffix != s) free(s); } /* Cheap string hash function, just used to salt the PRNG. */ static munit_uint32_t munit_str_hash(const char *name) { const char *p; munit_uint32_t h = 5381U; for (p = name; *p != '\0'; p++) h = (h << 5) + h + *p; return h; } static void munit_splice(int from, int to) { munit_uint8_t buf[1024]; #if !defined(_WIN32) ssize_t len; ssize_t bytes_written; ssize_t write_res; #else int len; int bytes_written; int write_res; #endif do { len = read(from, buf, sizeof(buf)); if (len > 0) { bytes_written = 0; do { write_res = write(to, buf + bytes_written, len - bytes_written); if (write_res < 0) break; bytes_written += write_res; } while (bytes_written < len); } else break; } while (true); } /* This is the part that should be handled in the child process */ static MunitResult munit_test_runner_exec(MunitTestRunner * runner, const MunitTest * test, const MunitParameter params[], MunitReport * report) { unsigned int iterations = runner->iterations; MunitResult result = MUNIT_FAIL; #if defined(MUNIT_ENABLE_TIMING) struct PsnipClockTimespec wall_clock_begin = {0,}, wall_clock_end = {0,}; struct PsnipClockTimespec cpu_clock_begin = {0,}, cpu_clock_end = {0,}; #endif unsigned int i = 0; if ((test->options & MUNIT_TEST_OPTION_SINGLE_ITERATION) == MUNIT_TEST_OPTION_SINGLE_ITERATION) iterations = 1; else if (iterations == 0) iterations = runner->suite->iterations; munit_rand_seed(runner->seed); do { void *data = (test->setup == NULL) ? runner->user_data : test->setup(params, runner->user_data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_begin); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_begin); #endif result = test->test(params, data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_end); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_end); #endif if (test->tear_down != NULL) test->tear_down(data); if (MUNIT_LIKELY(result == MUNIT_OK)) { report->successful++; #if defined(MUNIT_ENABLE_TIMING) report->wall_clock += munit_clock_get_elapsed(&wall_clock_begin, &wall_clock_end); report->cpu_clock += munit_clock_get_elapsed(&cpu_clock_begin, &cpu_clock_end); #endif } else { switch ((int)result) { case MUNIT_SKIP: report->skipped++; break; case MUNIT_FAIL: report->failed++; break; case MUNIT_ERROR: report->errored++; break; default: break; } break; } } while (++i < iterations); return result; } #if defined(MUNIT_EMOTICON) #define MUNIT_RESULT_STRING_OK ":)" #define MUNIT_RESULT_STRING_SKIP ":|" #define MUNIT_RESULT_STRING_FAIL ":(" #define MUNIT_RESULT_STRING_ERROR ":o" #define MUNIT_RESULT_STRING_TODO ":/" #else #define MUNIT_RESULT_STRING_OK "OK " #define MUNIT_RESULT_STRING_SKIP "SKIP " #define MUNIT_RESULT_STRING_FAIL "FAIL " #define MUNIT_RESULT_STRING_ERROR "ERROR" #define MUNIT_RESULT_STRING_TODO "TODO " #endif static void munit_test_runner_print_color(const MunitTestRunner * runner, const char *string, char color) { if (runner->colorize) fprintf(MUNIT_OUTPUT_FILE, "\x1b[3%cm%s\x1b[39m", color, string); else fputs(string, MUNIT_OUTPUT_FILE); } #if !defined(MUNIT_NO_BUFFER) static int munit_replace_stderr(FILE * stderr_buf) { if (stderr_buf != NULL) { const int orig_stderr = dup(STDERR_FILENO); int errfd = fileno(stderr_buf); if (MUNIT_UNLIKELY(errfd == -1)) { exit(EXIT_FAILURE); } dup2(errfd, STDERR_FILENO); return orig_stderr; } return -1; } static void munit_restore_stderr(int orig_stderr) { if (orig_stderr != -1) { dup2(orig_stderr, STDERR_FILENO); close(orig_stderr); } } #endif /* !defined(MUNIT_NO_BUFFER) */ /* Run a test with the specified parameters. */ static void munit_test_runner_run_test_with_params(MunitTestRunner * runner, const MunitTest * test, const MunitParameter params[]) { MunitResult result = MUNIT_OK; MunitReport report = { 0, 0, 0, 0, #if defined(MUNIT_ENABLE_TIMING) 0, 0 #endif }; unsigned int output_l; bool first; const MunitParameter *param; FILE *stderr_buf; #if !defined(MUNIT_NO_FORK) int pipefd[2]; pid_t fork_pid; int orig_stderr; ssize_t bytes_written = 0; ssize_t write_res; ssize_t bytes_read = 0; ssize_t read_res; int status = 0; pid_t changed_pid; #endif if (params != NULL) { output_l = 2; fputs(" ", MUNIT_OUTPUT_FILE); first = true; for (param = params; param != NULL && param->name != NULL; param++) { if (!first) { fputs(", ", MUNIT_OUTPUT_FILE); output_l += 2; } else { first = false; } output_l += fprintf(MUNIT_OUTPUT_FILE, "%s=%s", param->name, param->value); } while (output_l++ < MUNIT_TEST_NAME_LEN) { fputc(' ', MUNIT_OUTPUT_FILE); } } fflush(MUNIT_OUTPUT_FILE); stderr_buf = NULL; #if !defined(_WIN32) || defined(__MINGW32__) stderr_buf = tmpfile(); #else tmpfile_s(&stderr_buf); #endif if (stderr_buf == NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create buffer for stderr"); result = MUNIT_ERROR; goto print_result; } #if !defined(MUNIT_NO_FORK) if (runner->fork) { pipefd[0] = -1; pipefd[1] = -1; if (pipe(pipefd) != 0) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create pipe"); result = MUNIT_ERROR; goto print_result; } fork_pid = fork(); if (fork_pid == 0) { close(pipefd[0]); orig_stderr = munit_replace_stderr(stderr_buf); munit_test_runner_exec(runner, test, params, &report); /* * Note that we don't restore stderr. This is so we can buffer * things written to stderr later on (such as by asan/tsan/ubsan, * valgrind, etc.) */ close(orig_stderr); do { write_res = write(pipefd[1], ((munit_uint8_t *) (&report)) + bytes_written, sizeof(report) - bytes_written); if (write_res < 0) { if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to write to pipe"); } exit(EXIT_FAILURE); } bytes_written += write_res; } while ((size_t) bytes_written < sizeof(report)); if (stderr_buf != NULL) fclose(stderr_buf); close(pipefd[1]); exit(EXIT_SUCCESS); } else if (fork_pid == -1) { close(pipefd[0]); close(pipefd[1]); if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to fork"); } report.errored++; result = MUNIT_ERROR; } else { close(pipefd[1]); do { read_res = read(pipefd[0], ((munit_uint8_t *) (&report)) + bytes_read, sizeof(report) - bytes_read); if (read_res < 1) break; bytes_read += read_res; } while (bytes_read < (ssize_t) sizeof(report)); changed_pid = waitpid(fork_pid, &status, 0); if (MUNIT_LIKELY(changed_pid == fork_pid) && MUNIT_LIKELY(WIFEXITED(status))) { if (bytes_read != sizeof(report)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited unexpectedly with status %d", WEXITSTATUS(status)); report.errored++; } else if (WEXITSTATUS(status) != EXIT_SUCCESS) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited with status %d", WEXITSTATUS(status)); report.errored++; } } else { if (WIFSIGNALED(status)) { #if defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700) munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d (%s)", WTERMSIG(status), strsignal(WTERMSIG(status))); #else munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d", WTERMSIG(status)); #endif } else if (WIFSTOPPED(status)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child stopped by signal %d", WSTOPSIG(status)); } report.errored++; } close(pipefd[0]); waitpid(fork_pid, NULL, 0); } } else #endif { #if !defined(MUNIT_NO_BUFFER) const volatile int orig_stderr = munit_replace_stderr(stderr_buf); #endif #if defined(MUNIT_THREAD_LOCAL) if (MUNIT_UNLIKELY(setjmp(munit_error_jmp_buf) != 0)) { result = MUNIT_FAIL; report.failed++; } else { munit_error_jmp_buf_valid = true; result = munit_test_runner_exec(runner, test, params, &report); } #else result = munit_test_runner_exec(runner, test, params, &report); #endif #if !defined(MUNIT_NO_BUFFER) munit_restore_stderr(orig_stderr); #endif /* * Here just so that the label is used on Windows and we don't get a * warning */ goto print_result; } print_result: fputs("[ ", MUNIT_OUTPUT_FILE); if ((test->options & MUNIT_TEST_OPTION_TODO) == MUNIT_TEST_OPTION_TODO) { if (report.failed != 0 || report.errored != 0 || report.skipped != 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_TODO, '3'); result = MUNIT_OK; } else { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); if (MUNIT_LIKELY(stderr_buf != NULL)) munit_log_internal(MUNIT_LOG_ERROR, stderr_buf, "Test marked TODO, but was successful."); runner->report.failed++; result = MUNIT_ERROR; } } else if (report.failed > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_FAIL, '1'); runner->report.failed++; result = MUNIT_FAIL; } else if (report.errored > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); runner->report.errored++; result = MUNIT_ERROR; } else if (report.skipped > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_SKIP, '3'); runner->report.skipped++; result = MUNIT_SKIP; } else if (report.successful > 1) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock / report.successful); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock / report.successful); fprintf(MUNIT_OUTPUT_FILE, " CPU ]\n %-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s Total: [ ", ""); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } else if (report.successful > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } fputs(" ]\n", MUNIT_OUTPUT_FILE); if (stderr_buf != NULL) { if (result == MUNIT_FAIL || result == MUNIT_ERROR || runner->show_stderr) { fflush(MUNIT_OUTPUT_FILE); rewind(stderr_buf); munit_splice(fileno(stderr_buf), STDERR_FILENO); fflush(stderr); } fclose(stderr_buf); } } static void munit_test_runner_run_test_wild(MunitTestRunner * runner, const MunitTest * test, const char *test_name, MunitParameter * params, MunitParameter * p) { const MunitParameterEnum *pe; char **values; MunitParameter *next; for (pe = test->parameters; pe != NULL && pe->name != NULL; pe++) { if (p->name == pe->name) break; } if (pe == NULL) return; for (values = pe->values; *values != NULL; values++) { next = p + 1; p->value = *values; if (next->name == NULL) { munit_test_runner_run_test_with_params(runner, test, params); } else { munit_test_runner_run_test_wild(runner, test, test_name, params, next); } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) break; } } /* * Run a single test, with every combination of parameters requested. */ static void munit_test_runner_run_test(MunitTestRunner * runner, const MunitTest * test, const char *prefix) { char *test_name = munit_maybe_concat(NULL, (char *)prefix, (char *)test->name); /* * The array of parameters to pass to * munit_test_runner_run_test_with_params */ MunitParameter *params = NULL; size_t params_l = 0; /* * Wildcard parameters are parameters which have possible values * specified in the test, but no specific value was passed to the CLI. * That means we want to run the test once for every possible combination * of parameter values or, if --single was passed to the CLI, a single * time with a random set of parameters. */ MunitParameter *wild_params = NULL; size_t wild_params_l = 0; const MunitParameterEnum *pe; const MunitParameter *cli_p; bool filled; unsigned int possible; char **vals; size_t first_wild; const MunitParameter *wp; int pidx; munit_rand_seed(runner->seed); fprintf(MUNIT_OUTPUT_FILE, "%-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s", test_name); if (test->parameters == NULL) { /* No parameters. Simple, nice. */ munit_test_runner_run_test_with_params(runner, test, NULL); } else { fputc('\n', MUNIT_OUTPUT_FILE); for (pe = test->parameters; pe != NULL && pe->name != NULL; pe++) { /* Did we received a value for this parameter from the CLI? */ filled = false; for (cli_p = runner->parameters; cli_p != NULL && cli_p->name != NULL; cli_p++) { if (strcmp(cli_p->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, cli_p->value) != MUNIT_OK)) goto cleanup; filled = true; break; } } if (filled) continue; /* * Nothing from CLI, is the enum NULL/empty? We're not a fuzzer… */ if (pe->values == NULL || pe->values[0] == NULL) continue; /* * If --single was passed to the CLI, choose a value from the * list of possibilities randomly. */ if (runner->single_parameter_mode) { possible = 0; for (vals = pe->values; *vals != NULL; vals++) possible++; /* * We want the tests to be reproducible, even if you're only * running a single test, but we don't want every test with * the same number of parameters to choose the same parameter * number, so use the test name as a primitive salt. */ pidx = munit_rand_at_most(munit_str_hash(test_name), possible - 1); if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[pidx]) != MUNIT_OK)) goto cleanup; } else { /* * We want to try every permutation. Put in a placeholder * entry, we'll iterate through them later. */ if (MUNIT_UNLIKELY(munit_parameters_add(&wild_params_l, &wild_params, pe->name, NULL) != MUNIT_OK)) goto cleanup; } } if (wild_params_l != 0) { first_wild = params_l; for (wp = wild_params; wp != NULL && wp->name != NULL; wp++) { for (pe = test->parameters; pe != NULL && pe->name != NULL && pe->values != NULL; pe++) { if (strcmp(wp->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[0]) != MUNIT_OK)) goto cleanup; } } } munit_test_runner_run_test_wild(runner, test, test_name, params, params + first_wild); } else { munit_test_runner_run_test_with_params(runner, test, params); } cleanup: free(params); free(wild_params); } munit_maybe_free_concat(test_name, prefix, test->name); } /* * Recurse through the suite and run all the tests. If a list of tests to * run was provied on the command line, run only those tests. */ static void munit_test_runner_run_suite(MunitTestRunner * runner, const MunitSuite * suite, const char *prefix) { size_t pre_l; char *pre = munit_maybe_concat(&pre_l, (char *)prefix, (char *)suite->prefix); const MunitTest *test; const char **test_name; const MunitSuite *child_suite; /* Run the tests. */ for (test = suite->tests; test != NULL && test->test != NULL; test++) { if (runner->tests != NULL) { /* Specific tests were requested on the CLI */ for (test_name = runner->tests; test_name != NULL && *test_name != NULL; test_name++) { if ((pre_l == 0 || strncmp(pre, *test_name, pre_l) == 0) && strncmp(test->name, *test_name + pre_l, strlen(*test_name + pre_l)) == 0) { munit_test_runner_run_test(runner, test, pre); if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; } } } else { /* Run all tests */ munit_test_runner_run_test(runner, test, pre); } } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; /* Run any child suites. */ for (child_suite = suite->suites; child_suite != NULL && child_suite->prefix != NULL; child_suite++) { munit_test_runner_run_suite(runner, child_suite, pre); } cleanup: munit_maybe_free_concat(pre, prefix, suite->prefix); } static void munit_test_runner_run(MunitTestRunner * runner) { munit_test_runner_run_suite(runner, runner->suite, NULL); } static void munit_print_help(int argc, char *const argv[MUNIT_ARRAY_PARAM(argc + 1)], void *user_data, const MunitArgument arguments[]) { const MunitArgument *arg; (void)argc; printf("USAGE: %s [OPTIONS...] [TEST...]\n\n", argv[0]); puts(" --seed SEED\n" " Value used to seed the PRNG. Must be a 32-bit integer in decimal\n" " notation with no separators (commas, decimals, spaces, etc.), or\n" " hexidecimal prefixed by \"0x\".\n" " --iterations N\n" " Run each test N times. 0 means the default number.\n" " --param name value\n" " A parameter key/value pair which will be passed to any test with\n" " takes a parameter of that name. If not provided, the test will be\n" " run once for each possible parameter value.\n" " --list Write a list of all available tests.\n" " --list-params\n" " Write a list of all available tests and their possible parameters.\n" " --single Run each parameterized test in a single configuration instead of\n" " every possible combination\n" " --log-visible debug|info|warning|error\n" " --log-fatal debug|info|warning|error\n" " Set the level at which messages of different severities are visible,\n" " or cause the test to terminate.\n" #if !defined(MUNIT_NO_FORK) " --no-fork Do not execute tests in a child process. If this option is supplied\n" " and a test crashes (including by failing an assertion), no further\n" " tests will be performed.\n" #endif " --fatal-failures\n" " Stop executing tests as soon as a failure is found.\n" " --show-stderr\n" " Show data written to stderr by the tests, even if the test succeeds.\n" " --color auto|always|never\n" " Colorize (or don't) the output.\n" /* * 12345678901234567890123456789012345678901234567890123456789012345678901 * 234567890 */ " --help Print this help message and exit.\n"); #if defined(MUNIT_NL_LANGINFO) setlocale(LC_ALL, ""); fputs((strcasecmp("UTF-8", nl_langinfo(CODESET)) == 0) ? "µnit" : "munit", stdout); #else puts("munit"); #endif printf(" %d.%d.%d\n" "Full documentation at: https://nemequ.github.io/munit/\n", (MUNIT_CURRENT_VERSION >> 16) & 0xff, (MUNIT_CURRENT_VERSION >> 8) & 0xff, (MUNIT_CURRENT_VERSION >> 0) & 0xff); for (arg = arguments; arg != NULL && arg->name != NULL; arg++) arg->write_help(arg, user_data); } static const MunitArgument * munit_arguments_find(const MunitArgument arguments[], const char *name) { const MunitArgument *arg; for (arg = arguments; arg != NULL && arg->name != NULL; arg++) if (strcmp(arg->name, name) == 0) return arg; return NULL; } static void munit_suite_list_tests(const MunitSuite * suite, bool show_params, const char *prefix) { size_t pre_l; char *pre = munit_maybe_concat(&pre_l, (char *)prefix, (char *)suite->prefix); const MunitTest *test; const MunitParameterEnum *params; bool first; char **val; const MunitSuite *child_suite; for (test = suite->tests; test != NULL && test->name != NULL; test++) { if (pre != NULL) fputs(pre, stdout); puts(test->name); if (show_params) { for (params = test->parameters; params != NULL && params->name != NULL; params++) { fprintf(stdout, " - %s: ", params->name); if (params->values == NULL) { puts("Any"); } else { first = true; for (val = params->values; *val != NULL; val++) { if (!first) { fputs(", ", stdout); } else { first = false; } fputs(*val, stdout); } putc('\n', stdout); } } } } for (child_suite = suite->suites; child_suite != NULL && child_suite->prefix != NULL; child_suite++) { munit_suite_list_tests(child_suite, show_params, pre); } munit_maybe_free_concat(pre, prefix, suite->prefix); } static bool munit_stream_supports_ansi(FILE * stream) { #if !defined(_WIN32) return isatty(fileno(stream)); #else #if !defined(__MINGW32__) size_t ansicon_size = 0; #endif if (isatty(fileno(stream))) { #if !defined(__MINGW32__) getenv_s(&ansicon_size, NULL, 0, "ANSICON"); return ansicon_size != 0; #else return getenv("ANSICON") != NULL; #endif } return false; #endif } int munit_suite_main_custom(const MunitSuite * suite, void *user_data, int argc, char *const argv[MUNIT_ARRAY_PARAM(argc + 1)], const MunitArgument arguments[]) { int result = EXIT_FAILURE; MunitTestRunner runner; size_t parameters_size = 0; size_t tests_size = 0; int arg; char *envptr; unsigned long ts; char *endptr; unsigned long long iterations; MunitLogLevel level; const MunitArgument *argument; const char **runner_tests; unsigned int tests_run; unsigned int tests_total; runner.prefix = NULL; runner.suite = NULL; runner.tests = NULL; runner.seed = 0; runner.iterations = 0; runner.parameters = NULL; runner.single_parameter_mode = false; runner.user_data = NULL; runner.report.successful = 0; runner.report.skipped = 0; runner.report.failed = 0; runner.report.errored = 0; #if defined(MUNIT_ENABLE_TIMING) runner.report.cpu_clock = 0; runner.report.wall_clock = 0; #endif runner.colorize = false; #if !defined(_WIN32) runner.fork = true; #else runner.fork = false; #endif runner.show_stderr = false; runner.fatal_failures = false; runner.suite = suite; runner.user_data = user_data; runner.seed = munit_rand_generate_seed(); runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); for (arg = 1; arg < argc; arg++) { if (strncmp("--", argv[arg], 2) == 0) { if (strcmp("seed", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } envptr = argv[arg + 1]; ts = strtoul(argv[arg + 1], &envptr, 0); if (*envptr != '\0' || ts > (~((munit_uint32_t) 0U))) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.seed = (munit_uint32_t) ts; arg++; } else if (strcmp("iterations", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } endptr = argv[arg + 1]; iterations = strtoul(argv[arg + 1], &endptr, 0); if (*endptr != '\0' || iterations > UINT_MAX) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.iterations = (unsigned int)iterations; arg++; } else if (strcmp("param", argv[arg] + 2) == 0) { if (arg + 2 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires two arguments", argv[arg]); goto cleanup; } runner.parameters = realloc(runner.parameters, sizeof(MunitParameter) * (parameters_size + 2)); if (runner.parameters == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.parameters[parameters_size].name = (char *)argv[arg + 1]; runner.parameters[parameters_size].value = (char *)argv[arg + 2]; parameters_size++; runner.parameters[parameters_size].name = NULL; runner.parameters[parameters_size].value = NULL; arg += 2; } else if (strcmp("color", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "always") == 0) runner.colorize = true; else if (strcmp(argv[arg + 1], "never") == 0) runner.colorize = false; else if (strcmp(argv[arg + 1], "auto") == 0) runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } arg++; } else if (strcmp("help", argv[arg] + 2) == 0) { munit_print_help(argc, argv, user_data, arguments); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("single", argv[arg] + 2) == 0) { runner.single_parameter_mode = true; } else if (strcmp("show-stderr", argv[arg] + 2) == 0) { runner.show_stderr = true; #if !defined(_WIN32) } else if (strcmp("no-fork", argv[arg] + 2) == 0) { runner.fork = false; #endif } else if (strcmp("fatal-failures", argv[arg] + 2) == 0) { runner.fatal_failures = true; } else if (strcmp("log-visible", argv[arg] + 2) == 0 || strcmp("log-fatal", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "debug") == 0) level = MUNIT_LOG_DEBUG; else if (strcmp(argv[arg + 1], "info") == 0) level = MUNIT_LOG_INFO; else if (strcmp(argv[arg + 1], "warning") == 0) level = MUNIT_LOG_WARNING; else if (strcmp(argv[arg + 1], "error") == 0) level = MUNIT_LOG_ERROR; else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } if (strcmp("log-visible", argv[arg] + 2) == 0) munit_log_level_visible = level; else munit_log_level_fatal = level; arg++; } else if (strcmp("list", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, false, NULL); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("list-params", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, true, NULL); result = EXIT_SUCCESS; goto cleanup; } else { argument = munit_arguments_find(arguments, argv[arg] + 2); if (argument == NULL) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "unknown argument ('%s')", argv[arg]); goto cleanup; } if (!argument->parse_argument(suite, user_data, &arg, argc, argv)) goto cleanup; } } else { runner_tests = realloc((void *)runner.tests, sizeof(char *) * (tests_size + 2)); if (runner_tests == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.tests = runner_tests; runner.tests[tests_size++] = argv[arg]; runner.tests[tests_size] = NULL; } } fflush(stderr); fprintf(MUNIT_OUTPUT_FILE, "Running test suite with seed 0x%08" PRIx32 "...\n", runner.seed); munit_test_runner_run(&runner); tests_run = runner.report.successful + runner.report.failed + runner.report.errored; tests_total = tests_run + runner.report.skipped; if (tests_run == 0) { fprintf(stderr, "No tests run, %d (100%%) skipped.\n", runner.report.skipped); } else { fprintf(MUNIT_OUTPUT_FILE, "%d of %d (%0.0f%%) tests successful, %d (%0.0f%%) test skipped.\n", runner.report.successful, tests_run, (((double)runner.report.successful) / ((double)tests_run)) * 100.0, runner.report.skipped, (((double)runner.report.skipped) / ((double)tests_total)) * 100.0); } if (runner.report.failed == 0 && runner.report.errored == 0) { result = EXIT_SUCCESS; } cleanup: free(runner.parameters); free((void *)runner.tests); return result; } int munit_suite_main(const MunitSuite * suite, void *user_data, int argc, char *const argv[MUNIT_ARRAY_PARAM(argc + 1)]) { return munit_suite_main_custom(suite, user_data, argc, argv, NULL); }
/*** Configuration ***/ /* * This is just where the output from the test goes. It's really just meant * to let you choose stdout or stderr, but if anyone really want to direct it * to a file let me know, it would be fairly easy to support. */ #if !defined(MUNIT_OUTPUT_FILE) #define MUNIT_OUTPUT_FILE stdout #endif /* * This is a bit more useful; it tells µnit how to format the seconds in * timed tests. If your tests run for longer you might want to reduce it, * and if your computer is really fast and your tests are tiny you can * increase it. */ #if !defined(MUNIT_TEST_TIME_FORMAT) #define MUNIT_TEST_TIME_FORMAT "0.8f" #endif /* * If you have long test names you might want to consider bumping this. The * result information takes 43 characters. */ #if !defined(MUNIT_TEST_NAME_LEN) #define MUNIT_TEST_NAME_LEN 37 #endif /* * If you don't like the timing information, you can disable it by defining * MUNIT_DISABLE_TIMING. */ #if !defined(MUNIT_DISABLE_TIMING) #define MUNIT_ENABLE_TIMING #endif /*** End configuration ***/ #if defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE < 200809L) #undef _POSIX_C_SOURCE #endif #if !defined(_POSIX_C_SOURCE) #define _POSIX_C_SOURCE 200809L #endif /* * Solaris freaks out if you try to use a POSIX or SUS standard without the * "right" C standard. */ #if defined(_XOPEN_SOURCE) #undef _XOPEN_SOURCE #endif #if defined(__STDC_VERSION__) #if __STDC_VERSION__ >= 201112L #define _XOPEN_SOURCE 700 #elif __STDC_VERSION__ >= 199901L #define _XOPEN_SOURCE 600 #endif #endif /* * Because, according to Microsoft, POSIX is deprecated. You've got to * appreciate the chutzpah. */ #if defined(_MSC_VER) && !defined(_CRT_NONSTDC_NO_DEPRECATE) #define _CRT_NONSTDC_NO_DEPRECATE #endif #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) #include <stdbool.h> #elif defined(_WIN32) /* https://msdn.microsoft.com/en-us/library/tf4dy80a.aspx */ #endif #include <limits.h> #include <time.h> #include <errno.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <setjmp.h> #if !defined(MUNIT_NO_NL_LANGINFO) && !defined(_WIN32) #define MUNIT_NL_LANGINFO #include <locale.h> #include <langinfo.h> #include <strings.h> #endif #if !defined(_WIN32) #include <unistd.h> #include <sys/types.h> #include <sys/wait.h> #else #include <windows.h> #include <io.h> #include <fcntl.h> #if !defined(STDERR_FILENO) #define STDERR_FILENO _fileno(stderr) #endif #endif #include "munit.h" #define MUNIT_STRINGIFY(x) #x #define MUNIT_XSTRINGIFY(x) MUNIT_STRINGIFY(x) #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) || defined(_Thread_local) #define MUNIT_THREAD_LOCAL _Thread_local #elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__) #define MUNIT_THREAD_LOCAL __thread #elif defined(_WIN32) #define MUNIT_THREAD_LOCAL __declspec(thread) #endif /* * MSVC 12.0 will emit a warning at /W4 for code like 'do { ... } while (0)', * or 'do { ... } while (true)'. I'm pretty sure nobody at Microsoft * compiles with /W4. */ #if defined(_MSC_VER) && (_MSC_VER <= 1800) #pragma warning(disable: 4127) #endif #if defined(_WIN32) || defined(__EMSCRIPTEN__) #define MUNIT_NO_FORK #endif #if defined(__EMSCRIPTEN__) #define MUNIT_NO_BUFFER #endif /*** Logging ***/ static MunitLogLevel munit_log_level_visible = MUNIT_LOG_INFO; static MunitLogLevel munit_log_level_fatal = MUNIT_LOG_ERROR; #if defined(MUNIT_THREAD_LOCAL) static MUNIT_THREAD_LOCAL bool munit_error_jmp_buf_valid = false; static MUNIT_THREAD_LOCAL jmp_buf munit_error_jmp_buf; #endif /* * At certain warning levels, mingw will trigger warnings about suggesting * the format attribute, which we've explicity *not* set because it will then * choke on our attempts to use the MS-specific I64 modifier for size_t * (which we have to use since MSVC doesn't support the C99 z modifier). */ #if defined(__MINGW32__) || defined(__MINGW64__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wsuggest-attribute=format" #endif MUNIT_PRINTF(5, 0) static void munit_logf_exv(MunitLogLevel level, FILE * fp, const char *filename, int line, const char *format, va_list ap) { if (level < munit_log_level_visible) return; switch (level) { case MUNIT_LOG_DEBUG: fputs("Debug", fp); break; case MUNIT_LOG_INFO: fputs("Info", fp); break; case MUNIT_LOG_WARNING: fputs("Warning", fp); break; case MUNIT_LOG_ERROR: fputs("Error", fp); break; default: munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Invalid log level (%d)", level); return; } fputs(": ", fp); if (filename != NULL) fprintf(fp, "%s:%d: ", filename, line); vfprintf(fp, format, ap); fputc('\n', fp); } MUNIT_PRINTF(3, 4) static void munit_logf_internal(MunitLogLevel level, FILE * fp, const char *format,...) { va_list ap; va_start(ap, format); munit_logf_exv(level, fp, NULL, 0, format, ap); va_end(ap); } static void munit_log_internal(MunitLogLevel level, FILE * fp, const char *message) { munit_logf_internal(level, fp, "%s", message); } void munit_logf_ex(MunitLogLevel level, const char *filename, int line, const char *format,...) { va_list ap; va_start(ap, format); munit_logf_exv(level, stderr, filename, line, format, ap); va_end(ap); if (level >= munit_log_level_fatal) { #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } } void munit_errorf_ex(const char *filename, int line, const char *format,...) { va_list ap; va_start(ap, format); munit_logf_exv(MUNIT_LOG_ERROR, stderr, filename, line, format, ap); va_end(ap); #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } #if defined(__MINGW32__) || defined(__MINGW64__) #pragma GCC diagnostic pop #endif #if !defined(MUNIT_STRERROR_LEN) #define MUNIT_STRERROR_LEN 80 #endif static void munit_log_errno(MunitLogLevel level, FILE * fp, const char *msg) { #if defined(MUNIT_NO_STRERROR_R) || (defined(__MINGW32__) && !defined(MINGW_HAS_SECURE_API)) munit_logf_internal(level, fp, "%s: %s (%d)", msg, strerror(errno), errno); #else char munit_error_str[MUNIT_STRERROR_LEN]; munit_error_str[0] = '\0'; #if !defined(_WIN32) strerror_r(errno, munit_error_str, MUNIT_STRERROR_LEN); #else strerror_s(munit_error_str, MUNIT_STRERROR_LEN, errno); #endif munit_logf_internal(level, fp, "%s: %s (%d)", msg, munit_error_str, errno); #endif } /*** Memory allocation ***/ void * munit_malloc_ex(const char *filename, int line, size_t size) { void *ptr; if (size == 0) return NULL; ptr = calloc(1, size); if (MUNIT_UNLIKELY(ptr == NULL)) { munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Failed to allocate %" MUNIT_SIZE_MODIFIER "u bytes.", size); } return ptr; } /*** Timer code ***/ #if defined(MUNIT_ENABLE_TIMING) #define psnip_uint64_t munit_uint64_t #define psnip_uint32_t munit_uint32_t /* * Code copied from portable-snippets * <https://github.com/nemequ/portable-snippets/>. If you need to change * something, please do it there so we can keep the code in sync. */ /* * Clocks (v1) Portable Snippets - https://gitub.com/nemequ/portable-snippets * Created by Evan Nemerson <evan@nemerson.com> * * To the extent possible under law, the authors have waived all copyright and * related or neighboring rights to this code. For details, see the Creative * Commons Zero 1.0 Universal license at * https://creativecommons.org/publicdomain/zero/1.0/ */ #if !defined(PSNIP_CLOCK_H) #define PSNIP_CLOCK_H #if !defined(psnip_uint64_t) #include "../exact-int/exact-int.h" #endif #if !defined(PSNIP_CLOCK_STATIC_INLINE) #if defined(__GNUC__) #define PSNIP_CLOCK__COMPILER_ATTRIBUTES __attribute__((__unused__)) #else #define PSNIP_CLOCK__COMPILER_ATTRIBUTES #endif #define PSNIP_CLOCK__FUNCTION PSNIP_CLOCK__COMPILER_ATTRIBUTES static #endif enum PsnipClockType { /* * This clock provides the current time, in units since 1970-01-01 * 00:00:00 UTC not including leap seconds. In other words, UNIX time. * Keep in mind that this clock doesn't account for leap seconds, and can * go backwards (think NTP adjustments). */ PSNIP_CLOCK_TYPE_WALL = 1, /* * The CPU time is a clock which increases only when the current process * is active (i.e., it doesn't increment while blocking on I/O). */ PSNIP_CLOCK_TYPE_CPU = 2, /* * Monotonic time is always running (unlike CPU time), but it only ever * moves forward unless you reboot the system. Things like NTP * adjustments have no effect on this clock. */ PSNIP_CLOCK_TYPE_MONOTONIC = 3 }; struct PsnipClockTimespec { psnip_uint64_t seconds; psnip_uint64_t nanoseconds; }; /* Methods we support: */ #define PSNIP_CLOCK_METHOD_CLOCK_GETTIME 1 #define PSNIP_CLOCK_METHOD_TIME 2 #define PSNIP_CLOCK_METHOD_GETTIMEOFDAY 3 #define PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER 4 #define PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME 5 #define PSNIP_CLOCK_METHOD_CLOCK 6 #define PSNIP_CLOCK_METHOD_GETPROCESSTIMES 7 #define PSNIP_CLOCK_METHOD_GETRUSAGE 8 #define PSNIP_CLOCK_METHOD_GETSYSTEMTIMEPRECISEASFILETIME 9 #define PSNIP_CLOCK_METHOD_GETTICKCOUNT64 10 #include <assert.h> #if defined(HEDLEY_UNREACHABLE) #define PSNIP_CLOCK_UNREACHABLE() HEDLEY_UNREACHABLE() #else #define PSNIP_CLOCK_UNREACHABLE() assert(0) #endif /* Choose an implementation */ /* #undef PSNIP_CLOCK_WALL_METHOD */ /* #undef PSNIP_CLOCK_CPU_METHOD */ /* #undef PSNIP_CLOCK_MONOTONIC_METHOD */ /* * We want to be able to detect the libc implementation, so we include * <limits.h> (<features.h> isn't available everywhere). */ #if defined(__unix__) || defined(__unix) || defined(__linux__) #include <limits.h> #include <unistd.h> #endif #if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) /* * These are known to work without librt. If you know of others please let * us know so we can add them. */ #if \ (defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17))) || \ (defined(__FreeBSD__)) #define PSNIP_CLOCK_HAVE_CLOCK_GETTIME #elif !defined(PSNIP_CLOCK_NO_LIBRT) #define PSNIP_CLOCK_HAVE_CLOCK_GETTIME #endif #endif #if defined(_WIN32) #if !defined(PSNIP_CLOCK_CPU_METHOD) #define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_GETPROCESSTIMES #endif #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) #define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER #endif #endif #if defined(__MACH__) && !defined(__gnu_hurd__) #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) #define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME #endif #endif #if defined(PSNIP_CLOCK_HAVE_CLOCK_GETTIME) #include <time.h> #if !defined(PSNIP_CLOCK_WALL_METHOD) #if defined(CLOCK_REALTIME_PRECISE) #define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME #define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME_PRECISE #elif !defined(__sun) #define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME #define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME #endif #endif #if !defined(PSNIP_CLOCK_CPU_METHOD) #if defined(_POSIX_CPUTIME) || defined(CLOCK_PROCESS_CPUTIME_ID) #define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME #define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_PROCESS_CPUTIME_ID #elif defined(CLOCK_VIRTUAL) #define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME #define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_VIRTUAL #endif #endif #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) #if defined(CLOCK_MONOTONIC_RAW) #define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME #define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC #elif defined(CLOCK_MONOTONIC_PRECISE) #define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME #define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC_PRECISE #elif defined(_POSIX_MONOTONIC_CLOCK) || defined(CLOCK_MONOTONIC) #define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME #define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC #endif #endif #endif #if defined(_POSIX_VERSION) && (_POSIX_VERSION >= 200112L) #if !defined(PSNIP_CLOCK_WALL_METHOD) #define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_GETTIMEOFDAY #endif #endif #if !defined(PSNIP_CLOCK_WALL_METHOD) #define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_TIME #endif #if !defined(PSNIP_CLOCK_CPU_METHOD) #define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK #endif /* Primarily here for testing. */ #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) && defined(PSNIP_CLOCK_REQUIRE_MONOTONIC) #error No monotonic clock found. #endif /* Implementations */ #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_TIME)) #include <time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) #include <sys/time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) #include <windows.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) #include <sys/time.h> #include <sys/resource.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) #include <CoreServices/CoreServices.h> #include <mach/mach.h> #include <mach/mach_time.h> #endif /*** Implementations ***/ #define PSNIP_CLOCK_NSEC_PER_SEC ((psnip_uint32_t) (1000000000ULL)) #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock__clock_getres(clockid_t clk_id) { struct timespec res; int r; r = clock_getres(clk_id, &res); if (r != 0) return 0; return (psnip_uint32_t) (PSNIP_CLOCK_NSEC_PER_SEC / res.tv_nsec); } PSNIP_CLOCK__FUNCTION int psnip_clock__clock_gettime(clockid_t clk_id, struct PsnipClockTimespec *res) { struct timespec ts; if (clock_gettime(clk_id, &ts) != 0) return -10; res->seconds = (psnip_uint64_t) (ts.tv_sec); res->nanoseconds = (psnip_uint64_t) (ts.tv_nsec); return 0; } #endif PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_wall_get_precision(void) { #if !defined(PSNIP_CLOCK_WALL_METHOD) return 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_WALL); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY return 1000000; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME return 1; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_wall_get_time(struct PsnipClockTimespec *res) { (void)res; #if !defined(PSNIP_CLOCK_WALL_METHOD) return -2; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_WALL, res); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME res->seconds = time(NULL); res->nanoseconds = 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY struct timeval tv; if (gettimeofday(&tv, NULL) != 0) return -6; res->seconds = tv.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_cpu_get_precision(void) { #if !defined(PSNIP_CLOCK_CPU_METHOD) return 0; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_CPU); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK return CLOCKS_PER_SEC; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES return PSNIP_CLOCK_NSEC_PER_SEC / 100; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_cpu_get_time(struct PsnipClockTimespec *res) { #if !defined(PSNIP_CLOCK_CPU_METHOD) (void)res; return -2; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_CPU, res); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK clock_t t = clock(); if (t == ((clock_t) - 1)) return -5; res->seconds = t / CLOCKS_PER_SEC; res->nanoseconds = (t % CLOCKS_PER_SEC) * (PSNIP_CLOCK_NSEC_PER_SEC / CLOCKS_PER_SEC); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES FILETIME CreationTime, ExitTime, KernelTime, UserTime; LARGE_INTEGER date, adjust; if (!GetProcessTimes(GetCurrentProcess(), &CreationTime, &ExitTime, &KernelTime, &UserTime)) return -7; /* http://www.frenk.com/2009/12/convert-filetime-to-unix-timestamp/ */ date.HighPart = UserTime.dwHighDateTime; date.LowPart = UserTime.dwLowDateTime; adjust.QuadPart = 11644473600000 * 10000; date.QuadPart -= adjust.QuadPart; res->seconds = date.QuadPart / 10000000; res->nanoseconds = (date.QuadPart % 10000000) * (PSNIP_CLOCK_NSEC_PER_SEC / 100); #elif PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE struct rusage usage; if (getrusage(RUSAGE_SELF, &usage) != 0) return -8; res->seconds = usage.ru_utime.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else (void)res; return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_monotonic_get_precision(void) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) return 0; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME static mach_timebase_info_data_t tbi = {0,}; if (tbi.denom == 0) mach_timebase_info(&tbi); return (psnip_uint32_t) (tbi.numer / tbi.denom); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 return 1000; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER Frequency; QueryPerformanceFrequency(&Frequency); return (psnip_uint32_t) ((Frequency.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) ? PSNIP_CLOCK_NSEC_PER_SEC : Frequency.QuadPart); #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_monotonic_get_time(struct PsnipClockTimespec *res) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) (void)res; return -2; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC, res); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME psnip_uint64_t nsec = mach_absolute_time(); static mach_timebase_info_data_t tbi = {0,}; if (tbi.denom == 0) mach_timebase_info(&tbi); nsec *= ((psnip_uint64_t) tbi.numer) / ((psnip_uint64_t) tbi.denom); res->seconds = nsec / PSNIP_CLOCK_NSEC_PER_SEC; res->nanoseconds = nsec % PSNIP_CLOCK_NSEC_PER_SEC; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER t, f; if (QueryPerformanceCounter(&t) == 0) return -12; QueryPerformanceFrequency(&f); res->seconds = t.QuadPart / f.QuadPart; res->nanoseconds = t.QuadPart % f.QuadPart; if (f.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) res->nanoseconds /= f.QuadPart / PSNIP_CLOCK_NSEC_PER_SEC; else res->nanoseconds *= PSNIP_CLOCK_NSEC_PER_SEC / f.QuadPart; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 const ULONGLONG msec = GetTickCount64(); res->seconds = msec / 1000; res->nanoseconds = sec % 1000; #else return -2; #endif return 0; } /* * Returns the number of ticks per second for the specified clock. For * example, a clock with millisecond precision would return 1000, and a clock * with 1 second (such as the time() function) would return 1. * * If the requested clock isn't available, it will return 0. Hopefully this will * be rare, but if it happens to you please let us know so we can work on * finding a way to support your system. * * Note that different clocks on the same system often have a different * precisions. */ PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_get_precision(enum PsnipClockType clock_type) { switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_precision(); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_precision(); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_precision(); } PSNIP_CLOCK_UNREACHABLE(); return 0; } /* * Set the provided timespec to the requested time. Returns 0 on success, or * a negative value on failure. */ PSNIP_CLOCK__FUNCTION int psnip_clock_get_time(enum PsnipClockType clock_type, struct PsnipClockTimespec *res) { assert(res != NULL); switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_time(res); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_time(res); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_time(res); } return -1; } #endif /* !defined(PSNIP_CLOCK_H) */ static psnip_uint64_t munit_clock_get_elapsed(struct PsnipClockTimespec *start, struct PsnipClockTimespec *end) { psnip_uint64_t r = (end->seconds - start->seconds) * PSNIP_CLOCK_NSEC_PER_SEC; if (end->nanoseconds < start->nanoseconds) { r -= (start->nanoseconds - end->nanoseconds); } else { r += (end->nanoseconds - start->nanoseconds); } return r; } #endif /* defined(MUNIT_ENABLE_TIMING) */ /*** PRNG stuff ***/ /* * This is (unless I screwed up, which is entirely possible) the version of * PCG with 32-bit state. It was chosen because it has a small enough state * that we should reliably be able to use CAS instead of requiring a lock for * thread-safety. * * If I did screw up, I probably will not bother changing it unless there is a * significant bias. It's really not important this be particularly strong, * as long as it is fairly random it's much more important that it be * reproducible, so bug reports have a better chance of being reproducible. */ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) && !defined(__EMSCRIPTEN__) #define HAVE_STDATOMIC #elif defined(__clang__) #if __has_extension(c_atomic) #define HAVE_CLANG_ATOMICS #endif #endif /* Workaround for http://llvm.org/bugs/show_bug.cgi?id=26911 */ #if defined(__clang__) && defined(_WIN32) #undef HAVE_STDATOMIC #if defined(__c2__) #undef HAVE_CLANG_ATOMICS #endif #endif #if defined(_OPENMP) #define ATOMIC_UINT32_T uint32_t #define ATOMIC_UINT32_INIT(x) (x) #elif defined(HAVE_STDATOMIC) #include <stdatomic.h> #define ATOMIC_UINT32_T _Atomic uint32_t #define ATOMIC_UINT32_INIT(x) ATOMIC_VAR_INIT(x) #elif defined(HAVE_CLANG_ATOMICS) #define ATOMIC_UINT32_T _Atomic uint32_t #define ATOMIC_UINT32_INIT(x) (x) #elif defined(_WIN32) #define ATOMIC_UINT32_T volatile LONG #define ATOMIC_UINT32_INIT(x) (x) #else #define ATOMIC_UINT32_T volatile uint32_t #define ATOMIC_UINT32_INIT(x) (x) #endif static ATOMIC_UINT32_T munit_rand_state = ATOMIC_UINT32_INIT(42); #if defined(_OPENMP) static inline void munit_atomic_store(ATOMIC_UINT32_T * dest, ATOMIC_UINT32_T value) { #pragma omp critical (munit_atomics) *dest = value; } static inline uint32_t munit_atomic_load(ATOMIC_UINT32_T * src) { int ret; #pragma omp critical (munit_atomics) ret = *src; return ret; } static inline uint32_t munit_atomic_cas(ATOMIC_UINT32_T * dest, ATOMIC_UINT32_T * expected, ATOMIC_UINT32_T desired) { bool ret; #pragma omp critical (munit_atomics) { if (*dest == *expected) { *dest = desired; ret = true; } else { ret = false; } } return ret; } #elif defined(HAVE_STDATOMIC) #define munit_atomic_store(dest, value) atomic_store(dest, value) #define munit_atomic_load(src) atomic_load(src) #define munit_atomic_cas(dest, expected, value) atomic_compare_exchange_weak(dest, expected, value) #elif defined(HAVE_CLANG_ATOMICS) #define munit_atomic_store(dest, value) __c11_atomic_store(dest, value, __ATOMIC_SEQ_CST) #define munit_atomic_load(src) __c11_atomic_load(src, __ATOMIC_SEQ_CST) #define munit_atomic_cas(dest, expected, value) __c11_atomic_compare_exchange_weak(dest, expected, value, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) #define munit_atomic_store(dest, value) __atomic_store_n(dest, value, __ATOMIC_SEQ_CST) #define munit_atomic_load(src) __atomic_load_n(src, __ATOMIC_SEQ_CST) #define munit_atomic_cas(dest, expected, value) __atomic_compare_exchange_n(dest, expected, value, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ >= 4) #define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) #define munit_atomic_load(src) (*(src)) #define munit_atomic_cas(dest, expected, value) __sync_bool_compare_and_swap(dest, *expected, value) #elif defined(_WIN32) /* Untested */ #define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) #define munit_atomic_load(src) (*(src)) #define munit_atomic_cas(dest, expected, value) InterlockedCompareExchange((dest), (value), *(expected)) #else #warning No atomic implementation, PRNG will not be thread-safe #define munit_atomic_store(dest, value) do { *(dest) = (value); } while (0) #define munit_atomic_load(src) (*(src)) static inline bool munit_atomic_cas(ATOMIC_UINT32_T * dest, ATOMIC_UINT32_T * expected, ATOMIC_UINT32_T desired) { if (*dest == *expected) { *dest = desired; return true; } else { return false; } } #endif #define MUNIT_PRNG_MULTIPLIER (747796405U) #define MUNIT_PRNG_INCREMENT (1729U) static munit_uint32_t munit_rand_next_state(munit_uint32_t state) { return state * MUNIT_PRNG_MULTIPLIER + MUNIT_PRNG_INCREMENT; } static munit_uint32_t munit_rand_from_state(munit_uint32_t state) { munit_uint32_t res = ((state >> ((state >> 28) + 4)) ^ state) * (277803737U); res ^= res >> 22; return res; } void munit_rand_seed(munit_uint32_t seed) { munit_uint32_t state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); munit_atomic_store(&munit_rand_state, state); } static munit_uint32_t munit_rand_generate_seed(void) { struct PsnipClockTimespec wc = {0,}; munit_uint32_t seed, state; psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wc); seed = (munit_uint32_t) wc.nanoseconds; state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); return munit_rand_from_state(state); } static munit_uint32_t munit_rand_state_uint32(munit_uint32_t * state) { const munit_uint32_t old = *state; *state = munit_rand_next_state(old); return munit_rand_from_state(old); } munit_uint32_t munit_rand_uint32(void) { munit_uint32_t old, state; do { old = munit_atomic_load(&munit_rand_state); state = munit_rand_next_state(old); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return munit_rand_from_state(old); } static void munit_rand_state_memory(munit_uint32_t * state, size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { size_t members_remaining = size / sizeof(munit_uint32_t); size_t bytes_remaining = size % sizeof(munit_uint32_t); munit_uint8_t *b = data; munit_uint32_t rv; while (members_remaining-- > 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, sizeof(munit_uint32_t)); b += sizeof(munit_uint32_t); } if (bytes_remaining != 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, bytes_remaining); } } void munit_rand_memory(size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { munit_uint32_t old, state; do { state = old = munit_atomic_load(&munit_rand_state); munit_rand_state_memory(&state, size, data); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); } static munit_uint32_t munit_rand_state_at_most(munit_uint32_t * state, munit_uint32_t salt, munit_uint32_t max) { /* * We want (UINT32_MAX + 1) % max, which in unsigned arithmetic is the * same as (UINT32_MAX + 1 - max) % max = -max % max. We compute -max * using not to avoid compiler warnings. */ const munit_uint32_t min = (~max + 1U) % max; munit_uint32_t x; if (max == (~((munit_uint32_t) 0U))) return munit_rand_state_uint32(state) ^ salt; max++; do { x = munit_rand_state_uint32(state) ^ salt; } while (x < min); return x % max; } static munit_uint32_t munit_rand_at_most(munit_uint32_t salt, munit_uint32_t max) { munit_uint32_t old, state; munit_uint32_t retval; do { state = old = munit_atomic_load(&munit_rand_state); retval = munit_rand_state_at_most(&state, salt, max); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } int munit_rand_int_range(int min, int max) { munit_uint64_t range = (munit_uint64_t) max - (munit_uint64_t) min; if (min > max) return munit_rand_int_range(max, min); if (range > (~((munit_uint32_t) 0U))) range = (~((munit_uint32_t) 0U)); return min + munit_rand_at_most(0, (munit_uint32_t) range); } double munit_rand_double(void) { munit_uint32_t old, state; double retval = 0.0; do { state = old = munit_atomic_load(&munit_rand_state); /* * See http://mumble.net/~campbell/tmp/random_real.c for how to do * this right. Patches welcome if you feel that this is too biased. */ retval = munit_rand_state_uint32(&state) / ((~((munit_uint32_t) 0U)) + 1.0); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } /*** Test suite handling ***/ typedef struct { unsigned int successful; unsigned int skipped; unsigned int failed; unsigned int errored; #if defined(MUNIT_ENABLE_TIMING) munit_uint64_t cpu_clock; munit_uint64_t wall_clock; #endif } MunitReport; typedef struct { const char *prefix; const MunitSuite *suite; const char **tests; munit_uint32_t seed; unsigned int iterations; MunitParameter *parameters; bool single_parameter_mode; void *user_data; MunitReport report; bool colorize; bool fork; bool show_stderr; bool fatal_failures; } MunitTestRunner; const char * munit_parameters_get(const MunitParameter params[], const char *key) { const MunitParameter *param; for (param = params; param != NULL && param->name != NULL; param++) if (strcmp(param->name, key) == 0) return param->value; return NULL; } static void munit_print_time(FILE * fp, munit_uint64_t nanoseconds) { fprintf(fp, "%" MUNIT_TEST_TIME_FORMAT, ((double)nanoseconds) / ((double)PSNIP_CLOCK_NSEC_PER_SEC)); } /* Add a paramter to an array of parameters. */ static MunitResult munit_parameters_add(size_t * params_size, MunitParameter * params[MUNIT_ARRAY_PARAM(*params_size)], char *name, char *value) { *params = realloc(*params, sizeof(MunitParameter) * (*params_size + 2)); if (*params == NULL) return MUNIT_ERROR; (*params)[*params_size].name = name; (*params)[*params_size].value = value; (*params_size)++; (*params)[*params_size].name = NULL; (*params)[*params_size].value = NULL; return MUNIT_OK; } /* * Concatenate two strings, but just return one of the components unaltered * if the other is NULL or "". */ static char * munit_maybe_concat(size_t * len, char *prefix, char *suffix) { char *res; size_t res_l; const size_t prefix_l = prefix != NULL ? strlen(prefix) : 0; const size_t suffix_l = suffix != NULL ? strlen(suffix) : 0; if (prefix_l == 0 && suffix_l == 0) { res = NULL; res_l = 0; } else if (prefix_l == 0 && suffix_l != 0) { res = suffix; res_l = suffix_l; } else if (prefix_l != 0 && suffix_l == 0) { res = prefix; res_l = prefix_l; } else { res_l = prefix_l + suffix_l; res = malloc(res_l + 1); memcpy(res, prefix, prefix_l); memcpy(res + prefix_l, suffix, suffix_l); res[res_l] = 0; } if (len != NULL) *len = res_l; return res; } /* Possbily free a string returned by munit_maybe_concat. */ static void munit_maybe_free_concat(char *s, const char *prefix, const char *suffix) { if (prefix != s && suffix != s) free(s); } /* Cheap string hash function, just used to salt the PRNG. */ static munit_uint32_t munit_str_hash(const char *name) { const char *p; munit_uint32_t h = 5381U; for (p = name; *p != '\0'; p++) h = (h << 5) + h + *p; return h; } static void munit_splice(int from, int to) { munit_uint8_t buf[1024]; #if !defined(_WIN32) ssize_t len; ssize_t bytes_written; ssize_t write_res; #else int len; int bytes_written; int write_res; #endif do { len = read(from, buf, sizeof(buf)); if (len > 0) { bytes_written = 0; do { write_res = write(to, buf + bytes_written, len - bytes_written); if (write_res < 0) break; bytes_written += write_res; } while (bytes_written < len); } else break; } while (true); } /* This is the part that should be handled in the child process */ static MunitResult munit_test_runner_exec(MunitTestRunner * runner, const MunitTest * test, const MunitParameter params[], MunitReport * report) { unsigned int iterations = runner->iterations; MunitResult result = MUNIT_FAIL; #if defined(MUNIT_ENABLE_TIMING) struct PsnipClockTimespec wall_clock_begin = {0,}, wall_clock_end = {0,}; struct PsnipClockTimespec cpu_clock_begin = {0,}, cpu_clock_end = {0,}; #endif unsigned int i = 0; if ((test->options & MUNIT_TEST_OPTION_SINGLE_ITERATION) == MUNIT_TEST_OPTION_SINGLE_ITERATION) iterations = 1; else if (iterations == 0) iterations = runner->suite->iterations; munit_rand_seed(runner->seed); do { void *data = (test->setup == NULL) ? runner->user_data : test->setup(params, runner->user_data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_begin); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_begin); #endif result = test->test(params, data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_end); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_end); #endif if (test->tear_down != NULL) test->tear_down(data); if (MUNIT_LIKELY(result == MUNIT_OK)) { report->successful++; #if defined(MUNIT_ENABLE_TIMING) report->wall_clock += munit_clock_get_elapsed(&wall_clock_begin, &wall_clock_end); report->cpu_clock += munit_clock_get_elapsed(&cpu_clock_begin, &cpu_clock_end); #endif } else { switch ((int)result) { case MUNIT_SKIP: report->skipped++; break; case MUNIT_FAIL: report->failed++; break; case MUNIT_ERROR: report->errored++; break; default: break; } break; } } while (++i < iterations); return result; } #if defined(MUNIT_EMOTICON) #define MUNIT_RESULT_STRING_OK ":)" #define MUNIT_RESULT_STRING_SKIP ":|" #define MUNIT_RESULT_STRING_FAIL ":(" #define MUNIT_RESULT_STRING_ERROR ":o" #define MUNIT_RESULT_STRING_TODO ":/" #else #define MUNIT_RESULT_STRING_OK "OK " #define MUNIT_RESULT_STRING_SKIP "SKIP " #define MUNIT_RESULT_STRING_FAIL "FAIL " #define MUNIT_RESULT_STRING_ERROR "ERROR" #define MUNIT_RESULT_STRING_TODO "TODO " #endif static void munit_test_runner_print_color(const MunitTestRunner * runner, const char *string, char color) { if (runner->colorize) fprintf(MUNIT_OUTPUT_FILE, "\x1b[3%cm%s\x1b[39m", color, string); else fputs(string, MUNIT_OUTPUT_FILE); } #if !defined(MUNIT_NO_BUFFER) static int munit_replace_stderr(FILE * stderr_buf) { if (stderr_buf != NULL) { const int orig_stderr = dup(STDERR_FILENO); int errfd = fileno(stderr_buf); if (MUNIT_UNLIKELY(errfd == -1)) { exit(EXIT_FAILURE); } dup2(errfd, STDERR_FILENO); return orig_stderr; } return -1; } static void munit_restore_stderr(int orig_stderr) { if (orig_stderr != -1) { dup2(orig_stderr, STDERR_FILENO); close(orig_stderr); } } #endif /* !defined(MUNIT_NO_BUFFER) */ /* Run a test with the specified parameters. */ static void munit_test_runner_run_test_with_params(MunitTestRunner * runner, const MunitTest * test, const MunitParameter params[]) { MunitResult result = MUNIT_OK; MunitReport report = { 0, 0, 0, 0, #if defined(MUNIT_ENABLE_TIMING) 0, 0 #endif }; unsigned int output_l; bool first; const MunitParameter *param; FILE *stderr_buf; #if !defined(MUNIT_NO_FORK) int pipefd[2]; pid_t fork_pid; int orig_stderr; ssize_t bytes_written = 0; ssize_t write_res; ssize_t bytes_read = 0; ssize_t read_res; int status = 0; pid_t changed_pid; #endif if (params != NULL) { output_l = 2; fputs(" ", MUNIT_OUTPUT_FILE); first = true; for (param = params; param != NULL && param->name != NULL; param++) { if (!first) { fputs(", ", MUNIT_OUTPUT_FILE); output_l += 2; } else { first = false; } output_l += fprintf(MUNIT_OUTPUT_FILE, "%s=%s", param->name, param->value); } while (output_l++ < MUNIT_TEST_NAME_LEN) { fputc(' ', MUNIT_OUTPUT_FILE); } } fflush(MUNIT_OUTPUT_FILE); stderr_buf = NULL; #if !defined(_WIN32) || defined(__MINGW32__) stderr_buf = tmpfile(); #else tmpfile_s(&stderr_buf); #endif if (stderr_buf == NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create buffer for stderr"); result = MUNIT_ERROR; goto print_result; } #if !defined(MUNIT_NO_FORK) if (runner->fork) { pipefd[0] = -1; pipefd[1] = -1; if (pipe(pipefd) != 0) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create pipe"); result = MUNIT_ERROR; goto print_result; } fork_pid = fork(); if (fork_pid == 0) { close(pipefd[0]); orig_stderr = munit_replace_stderr(stderr_buf); munit_test_runner_exec(runner, test, params, &report); /* * Note that we don't restore stderr. This is so we can buffer * things written to stderr later on (such as by asan/tsan/ubsan, * valgrind, etc.) */ close(orig_stderr); do { write_res = write(pipefd[1], ((munit_uint8_t *) (&report)) + bytes_written, sizeof(report) - bytes_written); if (write_res < 0) { if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to write to pipe"); } exit(EXIT_FAILURE); } bytes_written += write_res; } while ((size_t) bytes_written < sizeof(report)); if (stderr_buf != NULL) fclose(stderr_buf); close(pipefd[1]); exit(EXIT_SUCCESS); } else if (fork_pid == -1) { close(pipefd[0]); close(pipefd[1]); if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to fork"); } report.errored++; result = MUNIT_ERROR; } else { close(pipefd[1]); do { read_res = read(pipefd[0], ((munit_uint8_t *) (&report)) + bytes_read, sizeof(report) - bytes_read); if (read_res < 1) break; bytes_read += read_res; } while (bytes_read < (ssize_t) sizeof(report)); changed_pid = waitpid(fork_pid, &status, 0); if (MUNIT_LIKELY(changed_pid == fork_pid) && MUNIT_LIKELY(WIFEXITED(status))) { if (bytes_read != sizeof(report)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited unexpectedly with status %d", WEXITSTATUS(status)); report.errored++; } else if (WEXITSTATUS(status) != EXIT_SUCCESS) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited with status %d", WEXITSTATUS(status)); report.errored++; } } else { if (WIFSIGNALED(status)) { #if defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700) munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d (%s)", WTERMSIG(status), strsignal(WTERMSIG(status))); #else munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d", WTERMSIG(status)); #endif } else if (WIFSTOPPED(status)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child stopped by signal %d", WSTOPSIG(status)); } report.errored++; } close(pipefd[0]); waitpid(fork_pid, NULL, 0); } } else #endif { #if !defined(MUNIT_NO_BUFFER) const volatile int orig_stderr = munit_replace_stderr(stderr_buf); #endif #if defined(MUNIT_THREAD_LOCAL) if (MUNIT_UNLIKELY(setjmp(munit_error_jmp_buf) != 0)) { result = MUNIT_FAIL; report.failed++; } else { munit_error_jmp_buf_valid = true; result = munit_test_runner_exec(runner, test, params, &report); } #else result = munit_test_runner_exec(runner, test, params, &report); #endif #if !defined(MUNIT_NO_BUFFER) munit_restore_stderr(orig_stderr); #endif /* * Here just so that the label is used on Windows and we don't get a * warning */ goto print_result; } print_result: fputs("[ ", MUNIT_OUTPUT_FILE); if ((test->options & MUNIT_TEST_OPTION_TODO) == MUNIT_TEST_OPTION_TODO) { if (report.failed != 0 || report.errored != 0 || report.skipped != 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_TODO, '3'); result = MUNIT_OK; } else { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); if (MUNIT_LIKELY(stderr_buf != NULL)) munit_log_internal(MUNIT_LOG_ERROR, stderr_buf, "Test marked TODO, but was successful."); runner->report.failed++; result = MUNIT_ERROR; } } else if (report.failed > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_FAIL, '1'); runner->report.failed++; result = MUNIT_FAIL; } else if (report.errored > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); runner->report.errored++; result = MUNIT_ERROR; } else if (report.skipped > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_SKIP, '3'); runner->report.skipped++; result = MUNIT_SKIP; } else if (report.successful > 1) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock / report.successful); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock / report.successful); fprintf(MUNIT_OUTPUT_FILE, " CPU ]\n %-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s Total: [ ", ""); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } else if (report.successful > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } fputs(" ]\n", MUNIT_OUTPUT_FILE); if (stderr_buf != NULL) { if (result == MUNIT_FAIL || result == MUNIT_ERROR || runner->show_stderr) { fflush(MUNIT_OUTPUT_FILE); rewind(stderr_buf); munit_splice(fileno(stderr_buf), STDERR_FILENO); fflush(stderr); } fclose(stderr_buf); } } static void munit_test_runner_run_test_wild(MunitTestRunner * runner, const MunitTest * test, const char *test_name, MunitParameter * params, MunitParameter * p) { const MunitParameterEnum *pe; char **values; MunitParameter *next; for (pe = test->parameters; pe != NULL && pe->name != NULL; pe++) { if (p->name == pe->name) break; } if (pe == NULL) return; for (values = pe->values; *values != NULL; values++) { next = p + 1; p->value = *values; if (next->name == NULL) { munit_test_runner_run_test_with_params(runner, test, params); } else { munit_test_runner_run_test_wild(runner, test, test_name, params, next); } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) break; } } /* * Run a single test, with every combination of parameters requested. */ static void munit_test_runner_run_test(MunitTestRunner * runner, const MunitTest * test, const char *prefix) { char *test_name = munit_maybe_concat(NULL, (char *)prefix, (char *)test->name); /* * The array of parameters to pass to * munit_test_runner_run_test_with_params */ MunitParameter *params = NULL; size_t params_l = 0; /* * Wildcard parameters are parameters which have possible values * specified in the test, but no specific value was passed to the CLI. * That means we want to run the test once for every possible combination * of parameter values or, if --single was passed to the CLI, a single * time with a random set of parameters. */ MunitParameter *wild_params = NULL; size_t wild_params_l = 0; const MunitParameterEnum *pe; const MunitParameter *cli_p; bool filled; unsigned int possible; char **vals; size_t first_wild; const MunitParameter *wp; int pidx; munit_rand_seed(runner->seed); fprintf(MUNIT_OUTPUT_FILE, "%-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s", test_name); if (test->parameters == NULL) { /* No parameters. Simple, nice. */ munit_test_runner_run_test_with_params(runner, test, NULL); } else { fputc('\n', MUNIT_OUTPUT_FILE); for (pe = test->parameters; pe != NULL && pe->name != NULL; pe++) { /* Did we received a value for this parameter from the CLI? */ filled = false; for (cli_p = runner->parameters; cli_p != NULL && cli_p->name != NULL; cli_p++) { if (strcmp(cli_p->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, cli_p->value) != MUNIT_OK)) goto cleanup; filled = true; break; } } if (filled) continue; /* * Nothing from CLI, is the enum NULL/empty? We're not a fuzzer… */ if (pe->values == NULL || pe->values[0] == NULL) continue; /* * If --single was passed to the CLI, choose a value from the * list of possibilities randomly. */ if (runner->single_parameter_mode) { possible = 0; for (vals = pe->values; *vals != NULL; vals++) possible++; /* * We want the tests to be reproducible, even if you're only * running a single test, but we don't want every test with * the same number of parameters to choose the same parameter * number, so use the test name as a primitive salt. */ pidx = munit_rand_at_most(munit_str_hash(test_name), possible - 1); if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[pidx]) != MUNIT_OK)) goto cleanup; } else { /* * We want to try every permutation. Put in a placeholder * entry, we'll iterate through them later. */ if (MUNIT_UNLIKELY(munit_parameters_add(&wild_params_l, &wild_params, pe->name, NULL) != MUNIT_OK)) goto cleanup; } } if (wild_params_l != 0) { first_wild = params_l; for (wp = wild_params; wp != NULL && wp->name != NULL; wp++) { for (pe = test->parameters; pe != NULL && pe->name != NULL && pe->values != NULL; pe++) { if (strcmp(wp->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[0]) != MUNIT_OK)) goto cleanup; } } } munit_test_runner_run_test_wild(runner, test, test_name, params, params + first_wild); } else { munit_test_runner_run_test_with_params(runner, test, params); } cleanup: free(params); free(wild_params); } munit_maybe_free_concat(test_name, prefix, test->name); } /* * Recurse through the suite and run all the tests. If a list of tests to * run was provied on the command line, run only those tests. */ static void munit_test_runner_run_suite(MunitTestRunner * runner, const MunitSuite * suite, const char *prefix) { size_t pre_l; char *pre = munit_maybe_concat(&pre_l, (char *)prefix, (char *)suite->prefix); const MunitTest *test; const char **test_name; const MunitSuite *child_suite; /* Run the tests. */ for (test = suite->tests; test != NULL && test->test != NULL; test++) { if (runner->tests != NULL) { /* Specific tests were requested on the CLI */ for (test_name = runner->tests; test_name != NULL && *test_name != NULL; test_name++) { if ((pre_l == 0 || strncmp(pre, *test_name, pre_l) == 0) && strncmp(test->name, *test_name + pre_l, strlen(*test_name + pre_l)) == 0) { munit_test_runner_run_test(runner, test, pre); if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; } } } else { /* Run all tests */ munit_test_runner_run_test(runner, test, pre); } } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; /* Run any child suites. */ for (child_suite = suite->suites; child_suite != NULL && child_suite->prefix != NULL; child_suite++) { munit_test_runner_run_suite(runner, child_suite, pre); } cleanup: munit_maybe_free_concat(pre, prefix, suite->prefix); } static void munit_test_runner_run(MunitTestRunner * runner) { munit_test_runner_run_suite(runner, runner->suite, NULL); } static void munit_print_help(int argc, char *const argv[MUNIT_ARRAY_PARAM(argc + 1)], void *user_data, const MunitArgument arguments[]) { const MunitArgument *arg; (void)argc; printf("USAGE: %s [OPTIONS...] [TEST...]\n\n", argv[0]); puts(" --seed SEED\n" " Value used to seed the PRNG. Must be a 32-bit integer in decimal\n" " notation with no separators (commas, decimals, spaces, etc.), or\n" " hexidecimal prefixed by \"0x\".\n" " --iterations N\n" " Run each test N times. 0 means the default number.\n" " --param name value\n" " A parameter key/value pair which will be passed to any test with\n" " takes a parameter of that name. If not provided, the test will be\n" " run once for each possible parameter value.\n" " --list Write a list of all available tests.\n" " --list-params\n" " Write a list of all available tests and their possible parameters.\n" " --single Run each parameterized test in a single configuration instead of\n" " every possible combination\n" " --log-visible debug|info|warning|error\n" " --log-fatal debug|info|warning|error\n" " Set the level at which messages of different severities are visible,\n" " or cause the test to terminate.\n" #if !defined(MUNIT_NO_FORK) " --no-fork Do not execute tests in a child process. If this option is supplied\n" " and a test crashes (including by failing an assertion), no further\n" " tests will be performed.\n" #endif " --fatal-failures\n" " Stop executing tests as soon as a failure is found.\n" " --show-stderr\n" " Show data written to stderr by the tests, even if the test succeeds.\n" " --color auto|always|never\n" " Colorize (or don't) the output.\n" /* * 12345678901234567890123456789012345678901234567890123456789012345678901 * 234567890 */ " --help Print this help message and exit.\n"); #if defined(MUNIT_NL_LANGINFO) setlocale(LC_ALL, ""); fputs((strcasecmp("UTF-8", nl_langinfo(CODESET)) == 0) ? "µnit" : "munit", stdout); #else puts("munit"); #endif printf(" %d.%d.%d\n" "Full documentation at: https://nemequ.github.io/munit/\n", (MUNIT_CURRENT_VERSION >> 16) & 0xff, (MUNIT_CURRENT_VERSION >> 8) & 0xff, (MUNIT_CURRENT_VERSION >> 0) & 0xff); for (arg = arguments; arg != NULL && arg->name != NULL; arg++) arg->write_help(arg, user_data); } static const MunitArgument * munit_arguments_find(const MunitArgument arguments[], const char *name) { const MunitArgument *arg; for (arg = arguments; arg != NULL && arg->name != NULL; arg++) if (strcmp(arg->name, name) == 0) return arg; return NULL; } static void munit_suite_list_tests(const MunitSuite * suite, bool show_params, const char *prefix) { size_t pre_l; char *pre = munit_maybe_concat(&pre_l, (char *)prefix, (char *)suite->prefix); const MunitTest *test; const MunitParameterEnum *params; bool first; char **val; const MunitSuite *child_suite; for (test = suite->tests; test != NULL && test->name != NULL; test++) { if (pre != NULL) fputs(pre, stdout); puts(test->name); if (show_params) { for (params = test->parameters; params != NULL && params->name != NULL; params++) { fprintf(stdout, " - %s: ", params->name); if (params->values == NULL) { puts("Any"); } else { first = true; for (val = params->values; *val != NULL; val++) { if (!first) { fputs(", ", stdout); } else { first = false; } fputs(*val, stdout); } putc('\n', stdout); } } } } for (child_suite = suite->suites; child_suite != NULL && child_suite->prefix != NULL; child_suite++) { munit_suite_list_tests(child_suite, show_params, pre); } munit_maybe_free_concat(pre, prefix, suite->prefix); } static bool munit_stream_supports_ansi(FILE * stream) { #if !defined(_WIN32) return isatty(fileno(stream)); #else #if !defined(__MINGW32__) size_t ansicon_size = 0; #endif if (isatty(fileno(stream))) { #if !defined(__MINGW32__) getenv_s(&ansicon_size, NULL, 0, "ANSICON"); return ansicon_size != 0; #else return getenv("ANSICON") != NULL; #endif } return false; #endif } int munit_suite_main_custom(const MunitSuite * suite, void *user_data, int argc, char *const argv[MUNIT_ARRAY_PARAM(argc + 1)], const MunitArgument arguments[]) { int result = EXIT_FAILURE; MunitTestRunner runner; size_t parameters_size = 0; size_t tests_size = 0; int arg; char *envptr; unsigned long ts; char *endptr; unsigned long long iterations; MunitLogLevel level; const MunitArgument *argument; const char **runner_tests; unsigned int tests_run; unsigned int tests_total; runner.prefix = NULL; runner.suite = NULL; runner.tests = NULL; runner.seed = 0; runner.iterations = 0; runner.parameters = NULL; runner.single_parameter_mode = false; runner.user_data = NULL; runner.report.successful = 0; runner.report.skipped = 0; runner.report.failed = 0; runner.report.errored = 0; #if defined(MUNIT_ENABLE_TIMING) runner.report.cpu_clock = 0; runner.report.wall_clock = 0; #endif runner.colorize = false; #if !defined(_WIN32) runner.fork = true; #else runner.fork = false; #endif runner.show_stderr = false; runner.fatal_failures = false; runner.suite = suite; runner.user_data = user_data; runner.seed = munit_rand_generate_seed(); runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); for (arg = 1; arg < argc; arg++) { if (strncmp("--", argv[arg], 2) == 0) { if (strcmp("seed", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } envptr = argv[arg + 1]; ts = strtoul(argv[arg + 1], &envptr, 0); if (*envptr != '\0' || ts > (~((munit_uint32_t) 0U))) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.seed = (munit_uint32_t) ts; arg++; } else if (strcmp("iterations", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } endptr = argv[arg + 1]; iterations = strtoul(argv[arg + 1], &endptr, 0); if (*endptr != '\0' || iterations > UINT_MAX) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.iterations = (unsigned int)iterations; arg++; } else if (strcmp("param", argv[arg] + 2) == 0) { if (arg + 2 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires two arguments", argv[arg]); goto cleanup; } runner.parameters = realloc(runner.parameters, sizeof(MunitParameter) * (parameters_size + 2)); if (runner.parameters == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.parameters[parameters_size].name = (char *)argv[arg + 1]; runner.parameters[parameters_size].value = (char *)argv[arg + 2]; parameters_size++; runner.parameters[parameters_size].name = NULL; runner.parameters[parameters_size].value = NULL; arg += 2; } else if (strcmp("color", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "always") == 0) runner.colorize = true; else if (strcmp(argv[arg + 1], "never") == 0) runner.colorize = false; else if (strcmp(argv[arg + 1], "auto") == 0) runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } arg++; } else if (strcmp("help", argv[arg] + 2) == 0) { munit_print_help(argc, argv, user_data, arguments); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("single", argv[arg] + 2) == 0) { runner.single_parameter_mode = true; } else if (strcmp("show-stderr", argv[arg] + 2) == 0) { runner.show_stderr = true; #if !defined(_WIN32) } else if (strcmp("no-fork", argv[arg] + 2) == 0) { runner.fork = false; #endif } else if (strcmp("fatal-failures", argv[arg] + 2) == 0) { runner.fatal_failures = true; } else if (strcmp("log-visible", argv[arg] + 2) == 0 || strcmp("log-fatal", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "debug") == 0) level = MUNIT_LOG_DEBUG; else if (strcmp(argv[arg + 1], "info") == 0) level = MUNIT_LOG_INFO; else if (strcmp(argv[arg + 1], "warning") == 0) level = MUNIT_LOG_WARNING; else if (strcmp(argv[arg + 1], "error") == 0) level = MUNIT_LOG_ERROR; else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } if (strcmp("log-visible", argv[arg] + 2) == 0) munit_log_level_visible = level; else munit_log_level_fatal = level; arg++; } else if (strcmp("list", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, false, NULL); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("list-params", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, true, NULL); result = EXIT_SUCCESS; goto cleanup; } else { argument = munit_arguments_find(arguments, argv[arg] + 2); if (argument == NULL) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "unknown argument ('%s')", argv[arg]); goto cleanup; } if (!argument->parse_argument(suite, user_data, &arg, argc, argv)) goto cleanup; } } else { runner_tests = realloc((void *)runner.tests, sizeof(char *) * (tests_size + 2)); if (runner_tests == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.tests = runner_tests; runner.tests[tests_size++] = argv[arg]; runner.tests[tests_size] = NULL; } } fflush(stderr); fprintf(MUNIT_OUTPUT_FILE, "Running test suite with seed 0x%08" PRIx32 "...\n", runner.seed); munit_test_runner_run(&runner); tests_run = runner.report.successful + runner.report.failed + runner.report.errored; tests_total = tests_run + runner.report.skipped; if (tests_run == 0) { fprintf(stderr, "No tests run, %d (100%%) skipped.\n", runner.report.skipped); } else { fprintf(MUNIT_OUTPUT_FILE, "%d of %d (%0.0f%%) tests successful, %d (%0.0f%%) test skipped.\n", runner.report.successful, tests_run, (((double)runner.report.successful) / ((double)tests_run)) * 100.0, runner.report.skipped, (((double)runner.report.skipped) / ((double)tests_total)) * 100.0); } if (runner.report.failed == 0 && runner.report.errored == 0) { result = EXIT_SUCCESS; } cleanup: free(runner.parameters); free((void *)runner.tests); return result; } int munit_suite_main(const MunitSuite * suite, void *user_data, int argc, char *const argv[MUNIT_ARRAY_PARAM(argc + 1)]) { return munit_suite_main_custom(suite, user_data, argc, argv, NULL); }
rose_circuit_OpenMP.c
#include <omp.h> # include <stdlib.h> # include <stdio.h> # include <time.h> int main(int argc,char *argv[]); int circuit_value(int n,int bvec[]); void i4_to_bvec(int i4,int n,int bvec[]); void timestamp(); /******************************************************************************/ int main(int argc,char *argv[]) /******************************************************************************/ /* Purpose: MAIN is the main program for SATISFY. Licensing: This code is distributed under the GNU LGPL license. Modified: 20 March 2009 Author: John Burkardt Reference: Michael Quinn, Parallel Programming in C with MPI and OpenMP, McGraw-Hill, 2004, ISBN13: 978-0071232654, LC: QA76.73.C15.Q55. */ { # define N 23 int bvec[23UL]; int i; int ihi; int j; int n = 23; int solution_num; int value; printf("\n"); timestamp(); printf("\n"); printf("SATISFY\n"); printf(" C version\n"); printf(" We have a logical function of N logical arguments.\n"); printf(" We do an exhaustive search of all 2^N possibilities,\n"); printf(" seeking those inputs that make the function TRUE.\n"); /* Compute the number of binary vectors to check. */ ihi = 1; for (i = 1; i <= n; i++) { ihi = (ihi * 2); } printf(" The number of logical variables is N = %d\n",n); printf(" The number of input vectors to check is %d\n",ihi); printf("\n"); printf(" # Index ---------Input Values------------------------\n"); printf("\n"); /* Check every possible input vector. */ solution_num = 0; #pragma omp parallel default(none) shared(solution_num,ihi,n) private(i,value,j) firstprivate(bvec) { #pragma omp for reduction ( + :solution_num) for (i = 0; i < ihi; i++) { i4_to_bvec(i,n,bvec); value = circuit_value(n,bvec); if (value == 1) { solution_num = (solution_num + 1); printf(" %2d %10d: ",solution_num,i); for (j = 0; j < n; j++) { printf(" %d",bvec[j]); } printf("\n"); } } } // Report. printf("\n"); printf(" Number of solutions found was %d\n",solution_num); /* Shut down. */ printf("\n"); printf("SATISFY\n"); printf(" Normal end of execution.\n"); printf("\n"); timestamp(); return 0; # undef N } /******************************************************************************/ int circuit_value(int n,int bvec[]) /******************************************************************************/ /* Purpose: CIRCUIT_VALUE returns the value of a circuit for a given input set. Licensing: This code is distributed under the GNU LGPL license. Modified: 20 March 2009 Author: John Burkardt Reference: Michael Quinn, Parallel Programming in C with MPI and OpenMP, McGraw-Hill, 2004, ISBN13: 978-0071232654, LC: QA76.73.C15.Q55. Parameters: Input, int N, the length of the input vector. Input, int BVEC[N], the binary inputs. Output, int CIRCUIT_VALUE, the output of the circuit. */ { int value; value = (((((((((((((((((((((((((((((((bvec[0] != 0) || (bvec[1] != 0)) && (!(bvec[1] != 0) || !(bvec[3] != 0))) && ((bvec[2] != 0) || (bvec[3] != 0))) && (!(bvec[3] != 0) || !(bvec[4] != 0))) && ((bvec[4] != 0) || !(bvec[5] != 0))) && ((bvec[5] != 0) || !(bvec[6] != 0))) && ((bvec[5] != 0) || (bvec[6] != 0))) && ((bvec[6] != 0) || !(bvec[15] != 0))) && ((bvec[7] != 0) || !(bvec[8] != 0))) && (!(bvec[7] != 0) || !(bvec[13] != 0))) && ((bvec[8] != 0) || (bvec[9] != 0))) && ((bvec[8] != 0) || !(bvec[9] != 0))) && (!(bvec[9] != 0) || !(bvec[10] != 0))) && ((bvec[9] != 0) || (bvec[11] != 0))) && ((bvec[10] != 0) || (bvec[11] != 0))) && ((bvec[12] != 0) || (bvec[13] != 0))) && ((bvec[13] != 0) || !(bvec[14] != 0))) && ((bvec[14] != 0) || (bvec[15] != 0))) && ((bvec[14] != 0) || (bvec[16] != 0))) && ((bvec[17] != 0) || (bvec[1] != 0))) && ((bvec[18] != 0) || !(bvec[0] != 0))) && ((bvec[19] != 0) || (bvec[1] != 0))) && ((bvec[19] != 0) || !(bvec[18] != 0))) && (!(bvec[19] != 0) || !(bvec[9] != 0))) && ((bvec[0] != 0) || (bvec[17] != 0))) && (!(bvec[1] != 0) || (bvec[20] != 0))) && (!(bvec[21] != 0) || (bvec[20] != 0))) && (!(bvec[22] != 0) || (bvec[20] != 0))) && (!(bvec[21] != 0) || !(bvec[20] != 0))) && ((bvec[22] != 0) || !(bvec[20] != 0))); return value; } /******************************************************************************/ void i4_to_bvec(int i4,int n,int bvec[]) /******************************************************************************/ /* Purpose: I4_TO_BVEC converts an integer into a binary vector. Licensing: This code is distributed under the GNU LGPL license. Modified: 20 March 2009 Author: John Burkardt Parameters: Input, int I4, the integer. Input, int N, the dimension of the vector. Output, int BVEC[N], the vector of binary remainders. */ { int i; for (i = (n - 1); 0 <= i; i--) { bvec[i] = (i4 % 2); i4 = (i4 / 2); } } /******************************************************************************/ void timestamp() /******************************************************************************/ /* Purpose: TIMESTAMP prints the current YMDHMS date as a time stamp. Example: 31 May 2001 09:45:54 AM Licensing: This code is distributed under the GNU LGPL license. Modified: 24 September 2003 Author: John Burkardt Parameters: None */ { # define TIME_SIZE 40 static char time_buffer[40UL]; const struct tm *tm; size_t len; time_t now; now = time(0); tm = (localtime((&now))); len = strftime(time_buffer,40,"%d %B %Y %I:%M:%S %p",tm); printf("%s\n",time_buffer); # undef TIME_SIZE }
#include <omp.h> #include <stdlib.h> #include <stdio.h> #include <time.h> int main(int argc, char *argv[]); int circuit_value(int n, int bvec[]); void i4_to_bvec(int i4, int n, int bvec[]); void timestamp(); /******************************************************************************/ int main(int argc, char *argv[]) /******************************************************************************/ /* * Purpose: MAIN is the main program for SATISFY. Licensing: This code is * distributed under the GNU LGPL license. Modified: 20 March 2009 Author: * John Burkardt Reference: Michael Quinn, Parallel Programming in C with MPI * and OpenMP, McGraw-Hill, 2004, ISBN13: 978-0071232654, LC: * QA76.73.C15.Q55. */ { #define N 23 int bvec[23UL]; int i; int ihi; int j; int n = 23; int solution_num; int value; printf("\n"); timestamp(); printf("\n"); printf("SATISFY\n"); printf(" C version\n"); printf(" We have a logical function of N logical arguments.\n"); printf(" We do an exhaustive search of all 2^N possibilities,\n"); printf(" seeking those inputs that make the function TRUE.\n"); /* * Compute the number of binary vectors to check. */ ihi = 1; for (i = 1; i <= n; i++) { ihi = (ihi * 2); } printf(" The number of logical variables is N = %d\n", n); printf(" The number of input vectors to check is %d\n", ihi); printf("\n"); printf(" # Index ---------Input Values------------------------\n"); printf("\n"); /* * Check every possible input vector. */ solution_num = 0; for (i = 0; i < ihi; i++) { i4_to_bvec(i, n, bvec); value = circuit_value(n, bvec); if (value == 1) { solution_num = (solution_num + 1); printf(" %2d %10d: ", solution_num, i); for (j = 0; j < n; j++) { printf(" %d", bvec[j]); } printf("\n"); } } //Report. printf("\n"); printf(" Number of solutions found was %d\n", solution_num); /* * Shut down. */ printf("\n"); printf("SATISFY\n"); printf(" Normal end of execution.\n"); printf("\n"); timestamp(); return 0; #undef N } /******************************************************************************/ int circuit_value(int n, int bvec[]) /******************************************************************************/ /* * Purpose: CIRCUIT_VALUE returns the value of a circuit for a given input * set. Licensing: This code is distributed under the GNU LGPL license. * Modified: 20 March 2009 Author: John Burkardt Reference: Michael Quinn, * Parallel Programming in C with MPI and OpenMP, McGraw-Hill, 2004, ISBN13: * 978-0071232654, LC: QA76.73.C15.Q55. Parameters: Input, int N, the length * of the input vector. Input, int BVEC[N], the binary inputs. Output, int * CIRCUIT_VALUE, the output of the circuit. */ { int value; value = (((((((((((((((((((((((((((((((bvec[0] != 0) || (bvec[1] != 0)) && (!(bvec[1] != 0) || !(bvec[3] != 0))) && ((bvec[2] != 0) || (bvec[3] != 0))) && (!(bvec[3] != 0) || !(bvec[4] != 0))) && ((bvec[4] != 0) || !(bvec[5] != 0))) && ((bvec[5] != 0) || !(bvec[6] != 0))) && ((bvec[5] != 0) || (bvec[6] != 0))) && ((bvec[6] != 0) || !(bvec[15] != 0))) && ((bvec[7] != 0) || !(bvec[8] != 0))) && (!(bvec[7] != 0) || !(bvec[13] != 0))) && ((bvec[8] != 0) || (bvec[9] != 0))) && ((bvec[8] != 0) || !(bvec[9] != 0))) && (!(bvec[9] != 0) || !(bvec[10] != 0))) && ((bvec[9] != 0) || (bvec[11] != 0))) && ((bvec[10] != 0) || (bvec[11] != 0))) && ((bvec[12] != 0) || (bvec[13] != 0))) && ((bvec[13] != 0) || !(bvec[14] != 0))) && ((bvec[14] != 0) || (bvec[15] != 0))) && ((bvec[14] != 0) || (bvec[16] != 0))) && ((bvec[17] != 0) || (bvec[1] != 0))) && ((bvec[18] != 0) || !(bvec[0] != 0))) && ((bvec[19] != 0) || (bvec[1] != 0))) && ((bvec[19] != 0) || !(bvec[18] != 0))) && (!(bvec[19] != 0) || !(bvec[9] != 0))) && ((bvec[0] != 0) || (bvec[17] != 0))) && (!(bvec[1] != 0) || (bvec[20] != 0))) && (!(bvec[21] != 0) || (bvec[20] != 0))) && (!(bvec[22] != 0) || (bvec[20] != 0))) && (!(bvec[21] != 0) || !(bvec[20] != 0))) && ((bvec[22] != 0) || !(bvec[20] != 0))); return value; } /******************************************************************************/ void i4_to_bvec(int i4, int n, int bvec[]) /******************************************************************************/ /* * Purpose: I4_TO_BVEC converts an integer into a binary vector. Licensing: * This code is distributed under the GNU LGPL license. Modified: 20 March * 2009 Author: John Burkardt Parameters: Input, int I4, the integer. Input, * int N, the dimension of the vector. Output, int BVEC[N], the vector of * binary remainders. */ { int i; for (i = (n - 1); 0 <= i; i--) { bvec[i] = (i4 % 2); i4 = (i4 / 2); } } /******************************************************************************/ void timestamp() /******************************************************************************/ /* * Purpose: TIMESTAMP prints the current YMDHMS date as a time stamp. * Example: 31 May 2001 09:45:54 AM Licensing: This code is distributed under * the GNU LGPL license. Modified: 24 September 2003 Author: John Burkardt * Parameters: None */ { #define TIME_SIZE 40 static char time_buffer[40UL]; const struct tm *tm; size_t len; time_t now; now = time(0); tm = (localtime((&now))); len = strftime(time_buffer, 40, "%d %B %Y %I:%M:%S %p", tm); printf("%s\n", time_buffer); #undef TIME_SIZE }
#include <omp.h> #include <stdlib.h> #include <stdio.h> #include <time.h> int main(int argc, char *argv[]); int circuit_value(int n, int bvec[]); void i4_to_bvec(int i4, int n, int bvec[]); void timestamp(); /******************************************************************************/ int main(int argc, char *argv[]) /******************************************************************************/ /* * Purpose: MAIN is the main program for SATISFY. Licensing: This code is * distributed under the GNU LGPL license. Modified: 20 March 2009 Author: * John Burkardt Reference: Michael Quinn, Parallel Programming in C with MPI * and OpenMP, McGraw-Hill, 2004, ISBN13: 978-0071232654, LC: * QA76.73.C15.Q55. */ { #define N 23 int bvec[23UL]; int i; int ihi; int j; int n = 23; int solution_num; int value; printf("\n"); timestamp(); printf("\n"); printf("SATISFY\n"); printf(" C version\n"); printf(" We have a logical function of N logical arguments.\n"); printf(" We do an exhaustive search of all 2^N possibilities,\n"); printf(" seeking those inputs that make the function TRUE.\n"); /* * Compute the number of binary vectors to check. */ ihi = 1; for (i = 1; i <= n; i++) { ihi = (ihi * 2); } printf(" The number of logical variables is N = %d\n", n); printf(" The number of input vectors to check is %d\n", ihi); printf("\n"); printf(" # Index ---------Input Values------------------------\n"); printf("\n"); /* * Check every possible input vector. */ solution_num = 0; #pragma omp parallel default(none) shared(solution_num,ihi,n) private(i,value,j) firstprivate(bvec) { #pragma omp for reduction ( + :solution_num) for (i = 0; i < ihi; i++) { i4_to_bvec(i, n, bvec); value = circuit_value(n, bvec); if (value == 1) { solution_num = (solution_num + 1); printf(" %2d %10d: ", solution_num, i); for (j = 0; j < n; j++) { printf(" %d", bvec[j]); } printf("\n"); } } } //Report. printf("\n"); printf(" Number of solutions found was %d\n", solution_num); /* * Shut down. */ printf("\n"); printf("SATISFY\n"); printf(" Normal end of execution.\n"); printf("\n"); timestamp(); return 0; #undef N } /******************************************************************************/ int circuit_value(int n, int bvec[]) /******************************************************************************/ /* * Purpose: CIRCUIT_VALUE returns the value of a circuit for a given input * set. Licensing: This code is distributed under the GNU LGPL license. * Modified: 20 March 2009 Author: John Burkardt Reference: Michael Quinn, * Parallel Programming in C with MPI and OpenMP, McGraw-Hill, 2004, ISBN13: * 978-0071232654, LC: QA76.73.C15.Q55. Parameters: Input, int N, the length * of the input vector. Input, int BVEC[N], the binary inputs. Output, int * CIRCUIT_VALUE, the output of the circuit. */ { int value; value = (((((((((((((((((((((((((((((((bvec[0] != 0) || (bvec[1] != 0)) && (!(bvec[1] != 0) || !(bvec[3] != 0))) && ((bvec[2] != 0) || (bvec[3] != 0))) && (!(bvec[3] != 0) || !(bvec[4] != 0))) && ((bvec[4] != 0) || !(bvec[5] != 0))) && ((bvec[5] != 0) || !(bvec[6] != 0))) && ((bvec[5] != 0) || (bvec[6] != 0))) && ((bvec[6] != 0) || !(bvec[15] != 0))) && ((bvec[7] != 0) || !(bvec[8] != 0))) && (!(bvec[7] != 0) || !(bvec[13] != 0))) && ((bvec[8] != 0) || (bvec[9] != 0))) && ((bvec[8] != 0) || !(bvec[9] != 0))) && (!(bvec[9] != 0) || !(bvec[10] != 0))) && ((bvec[9] != 0) || (bvec[11] != 0))) && ((bvec[10] != 0) || (bvec[11] != 0))) && ((bvec[12] != 0) || (bvec[13] != 0))) && ((bvec[13] != 0) || !(bvec[14] != 0))) && ((bvec[14] != 0) || (bvec[15] != 0))) && ((bvec[14] != 0) || (bvec[16] != 0))) && ((bvec[17] != 0) || (bvec[1] != 0))) && ((bvec[18] != 0) || !(bvec[0] != 0))) && ((bvec[19] != 0) || (bvec[1] != 0))) && ((bvec[19] != 0) || !(bvec[18] != 0))) && (!(bvec[19] != 0) || !(bvec[9] != 0))) && ((bvec[0] != 0) || (bvec[17] != 0))) && (!(bvec[1] != 0) || (bvec[20] != 0))) && (!(bvec[21] != 0) || (bvec[20] != 0))) && (!(bvec[22] != 0) || (bvec[20] != 0))) && (!(bvec[21] != 0) || !(bvec[20] != 0))) && ((bvec[22] != 0) || !(bvec[20] != 0))); return value; } /******************************************************************************/ void i4_to_bvec(int i4, int n, int bvec[]) /******************************************************************************/ /* * Purpose: I4_TO_BVEC converts an integer into a binary vector. Licensing: * This code is distributed under the GNU LGPL license. Modified: 20 March * 2009 Author: John Burkardt Parameters: Input, int I4, the integer. Input, * int N, the dimension of the vector. Output, int BVEC[N], the vector of * binary remainders. */ { int i; for (i = (n - 1); 0 <= i; i--) { bvec[i] = (i4 % 2); i4 = (i4 / 2); } } /******************************************************************************/ void timestamp() /******************************************************************************/ /* * Purpose: TIMESTAMP prints the current YMDHMS date as a time stamp. * Example: 31 May 2001 09:45:54 AM Licensing: This code is distributed under * the GNU LGPL license. Modified: 24 September 2003 Author: John Burkardt * Parameters: None */ { #define TIME_SIZE 40 static char time_buffer[40UL]; const struct tm *tm; size_t len; time_t now; now = time(0); tm = (localtime((&now))); len = strftime(time_buffer, 40, "%d %B %Y %I:%M:%S %p", tm); printf("%s\n", time_buffer); #undef TIME_SIZE }
convolution_sgemm.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv_im2col_sgemm_transform_kernel_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size) { const float* kernel = _kernel; #if __ARM_NEON && __aarch64__ // kernel memory packed 8 x 8 kernel_tm.create(8*kernel_size, inch, outch/8 + (outch%8)/4 + outch%4); #else // kernel memory packed 4 x 8 kernel_tm.create(4*kernel_size, inch, outch/4 + outch%4); #endif int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; const float* k0 = kernel + (p+0)*inch*kernel_size; const float* k1 = kernel + (p+1)*inch*kernel_size; const float* k2 = kernel + (p+2)*inch*kernel_size; const float* k3 = kernel + (p+3)*inch*kernel_size; const float* k4 = kernel + (p+4)*inch*kernel_size; const float* k5 = kernel + (p+5)*inch*kernel_size; const float* k6 = kernel + (p+6)*inch*kernel_size; const float* k7 = kernel + (p+7)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/8); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } #endif nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; const float* k0 = kernel + (p+0)*inch*kernel_size; const float* k1 = kernel + (p+1)*inch*kernel_size; const float* k2 = kernel + (p+2)*inch*kernel_size; const float* k3 = kernel + (p+3)*inch*kernel_size; #if __ARM_NEON && __aarch64__ float* ktmp = kernel_tm.channel(p/8 + (p%8)/4); #else float* ktmp = kernel_tm.channel(p/4); #endif // __ARM_NEON && __aarch64__ for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { const float* k0 = kernel + (p+0)*inch*kernel_size; #if __ARM_NEON && __aarch64__ float* ktmp = kernel_tm.channel(p/8 + (p%8)/4 + p%4); #else float* ktmp = kernel_tm.channel(p/4 + p%4); #endif // __ARM_NEON && __aarch64__ for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } static void conv_im2col_sgemm_neon(const Mat &bottom_blob, Mat &top_blob, const Mat & kernel_tm, const Mat& _bias, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // im2col Mat bottom_im2col(outw*outh, kernel_h*kernel_w*inch, elemsize, opt.workspace_allocator); { const int stride = kernel_h*kernel_w*outw*outh; float* ret = (float*)bottom_im2col; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<inch; p++) { const float* input = bottom_blob.channel(p); int retID = stride * p; for (int u=0; u<kernel_h; u++) { for (int v=0; v<kernel_w; v++) { for (int i=0; i<outh; i++) { for (int j=0; j<outw; j++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // bottom_im2col memory packed 8 x 8 Mat bottom_tm(8*kernel_size, inch, out_size/8 + out_size%8, elemsize, opt.workspace_allocator); { int nn_size = out_size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 8; const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/8); for (int q=0; q<inch*kernel_size; q++) { #if __ARM_NEON #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1] \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "cc", "memory", "v0", "v1" ); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0] \n" "vst1.f32 {d0-d3}, [%1] \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1" ); #endif // __aarch64__ #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; #endif // __ARM_NEON tmpptr += 8; img0 += out_size; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<out_size; i++) { const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/8 + i%8); for (int q=0; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += out_size; } } } // sgemm(int M, int N, int L, float* A, float* B, float* C) { //int M = outch; // outch int N = outw * outh; // outsize or out stride int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; #if __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 8; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); float* output4 = top_blob.channel(i+4); float* output5 = top_blob.channel(i+5); float* output6 = top_blob.channel(i+6); float* output7 = top_blob.channel(i+7); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); const float* va = kernel_tm.channel(i/8); #if __ARM_NEON asm volatile( "ld1 {v0.4s, v1.4s}, [%21] \n" "dup v16.4s, v0.s[0] \n"// sum0 "dup v17.4s, v0.s[0] \n" "dup v18.4s, v0.s[1] \n"// sum1 "dup v19.4s, v0.s[1] \n" "dup v20.4s, v0.s[2] \n"// sum2 "dup v21.4s, v0.s[2] \n" "dup v22.4s, v0.s[3] \n"// sum3 "dup v23.4s, v0.s[3] \n" "dup v24.4s, v1.s[0] \n"// sum4 "dup v25.4s, v1.s[0] \n" "dup v26.4s, v1.s[1] \n"// sum5 "dup v27.4s, v1.s[1] \n" "dup v28.4s, v1.s[2] \n"// sum6 "dup v29.4s, v1.s[2] \n" "dup v30.4s, v1.s[3] \n"// sum7 "dup v31.4s, v1.s[3] \n" "lsr w4, %w20, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" // kernel "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" // data "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// "fmla v18.4s, v8.4s, v0.s[1] \n"// sum1 += (a00-a70) * k10 "fmla v19.4s, v9.4s, v0.s[1] \n"// "fmla v20.4s, v8.4s, v0.s[2] \n"// sum2 += (a00-a70) * k20 "fmla v21.4s, v9.4s, v0.s[2] \n"// "fmla v22.4s, v8.4s, v0.s[3] \n"// sum3 += (a00-a70) * k30 "fmla v23.4s, v9.4s, v0.s[3] \n"// "fmla v24.4s, v8.4s, v1.s[0] \n"// sum4 += (a00-a70) * k40 "fmla v25.4s, v9.4s, v1.s[0] \n"// "fmla v26.4s, v8.4s, v1.s[1] \n"// sum5 += (a00-a70) * k50 "fmla v27.4s, v9.4s, v1.s[1] \n"// "fmla v28.4s, v8.4s, v1.s[2] \n"// sum6 += (a00-a70) * k60 "fmla v29.4s, v9.4s, v1.s[2] \n"// "fmla v30.4s, v8.4s, v1.s[3] \n"// sum7 += (a00-a70) * k70 "fmla v31.4s, v9.4s, v1.s[3] \n"// // k1 "fmla v16.4s, v10.4s, v2.s[0] \n"// sum0 += (a01-a71) * k01 "fmla v17.4s, v11.4s, v2.s[0] \n"// "fmla v18.4s, v10.4s, v2.s[1] \n"// sum1 += (a01-a71) * k11 "fmla v19.4s, v11.4s, v2.s[1] \n"// "fmla v20.4s, v10.4s, v2.s[2] \n"// sum2 += (a01-a71) * k21 "fmla v21.4s, v11.4s, v2.s[2] \n"// "fmla v22.4s, v10.4s, v2.s[3] \n"// sum3 += (a01-a71) * k31 "fmla v23.4s, v11.4s, v2.s[3] \n"// "fmla v24.4s, v10.4s, v3.s[0] \n"// sum4 += (a01-a71) * k41 "fmla v25.4s, v11.4s, v3.s[0] \n"// "fmla v26.4s, v10.4s, v3.s[1] \n"// sum5 += (a01-a71) * k51 "fmla v27.4s, v11.4s, v3.s[1] \n"// "fmla v28.4s, v10.4s, v3.s[2] \n"// sum6 += (a01-a71) * k61 "fmla v29.4s, v11.4s, v3.s[2] \n"// "fmla v30.4s, v10.4s, v3.s[3] \n"// sum7 += (a01-a71) * k71 "fmla v31.4s, v11.4s, v3.s[3] \n"// // k2 "fmla v16.4s, v12.4s, v4.s[0] \n"// sum0 += (a02-a72) * k02 "fmla v17.4s, v13.4s, v4.s[0] \n"// "fmla v18.4s, v12.4s, v4.s[1] \n"// sum1 += (a02-a72) * k12 "fmla v19.4s, v13.4s, v4.s[1] \n"// "fmla v20.4s, v12.4s, v4.s[2] \n"// sum2 += (a02-a72) * k22 "fmla v21.4s, v13.4s, v4.s[2] \n"// "fmla v22.4s, v12.4s, v4.s[3] \n"// sum3 += (a02-a72) * k32 "fmla v23.4s, v13.4s, v4.s[3] \n"// "fmla v24.4s, v12.4s, v5.s[0] \n"// sum4 += (a02-a72) * k42 "fmla v25.4s, v13.4s, v5.s[0] \n"// "fmla v26.4s, v12.4s, v5.s[1] \n"// sum5 += (a02-a72) * k52 "fmla v27.4s, v13.4s, v5.s[1] \n"// "fmla v28.4s, v12.4s, v5.s[2] \n"// sum6 += (a02-a72) * k62 "fmla v29.4s, v13.4s, v5.s[2] \n"// "fmla v30.4s, v12.4s, v5.s[3] \n"// sum7 += (a02-a72) * k72 "fmla v31.4s, v13.4s, v5.s[3] \n"// // k3 "fmla v16.4s, v14.4s, v6.s[0] \n"// sum0 += (a03-a73) * k03 "fmla v17.4s, v15.4s, v6.s[0] \n"// "fmla v18.4s, v14.4s, v6.s[1] \n"// sum1 += (a03-a73) * k13 "fmla v19.4s, v15.4s, v6.s[1] \n"// "fmla v20.4s, v14.4s, v6.s[2] \n"// sum2 += (a03-a73) * k23 "fmla v21.4s, v15.4s, v6.s[2] \n"// "fmla v22.4s, v14.4s, v6.s[3] \n"// sum3 += (a03-a73) * k33 "fmla v23.4s, v15.4s, v6.s[3] \n"// "fmla v24.4s, v14.4s, v7.s[0] \n"// sum4 += (a03-a73) * k43 "fmla v25.4s, v15.4s, v7.s[0] \n"// "fmla v26.4s, v14.4s, v7.s[1] \n"// sum5 += (a03-a73) * k53 "fmla v27.4s, v15.4s, v7.s[1] \n"// "fmla v28.4s, v14.4s, v7.s[2] \n"// sum6 += (a03-a73) * k63 "fmla v29.4s, v15.4s, v7.s[2] \n"// "fmla v30.4s, v14.4s, v7.s[3] \n"// sum7 += (a03-a73) * k73 "fmla v31.4s, v15.4s, v7.s[3] \n"// "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w20, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v8.4s, v9.4s}, [%8], #32 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// "fmla v18.4s, v8.4s, v0.s[1] \n"// sum1 += (a00-a70) * k10 "fmla v19.4s, v9.4s, v0.s[1] \n"// "fmla v20.4s, v8.4s, v0.s[2] \n"// sum2 += (a00-a70) * k20 "fmla v21.4s, v9.4s, v0.s[2] \n"// "fmla v22.4s, v8.4s, v0.s[3] \n"// sum3 += (a00-a70) * k30 "fmla v23.4s, v9.4s, v0.s[3] \n"// "fmla v24.4s, v8.4s, v1.s[0] \n"// sum4 += (a00-a70) * k40 "fmla v25.4s, v9.4s, v1.s[0] \n"// "fmla v26.4s, v8.4s, v1.s[1] \n"// sum5 += (a00-a70) * k50 "fmla v27.4s, v9.4s, v1.s[1] \n"// "fmla v28.4s, v8.4s, v1.s[2] \n"// sum6 += (a00-a70) * k60 "fmla v29.4s, v9.4s, v1.s[2] \n"// "fmla v30.4s, v8.4s, v1.s[3] \n"// sum7 += (a00-a70) * k70 "fmla v31.4s, v9.4s, v1.s[3] \n"// "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0] \n" "st1 {v18.4s, v19.4s}, [%1] \n" "st1 {v20.4s, v21.4s}, [%2] \n" "st1 {v22.4s, v23.4s}, [%3] \n" "st1 {v24.4s, v25.4s}, [%4] \n" "st1 {v26.4s, v27.4s}, [%5] \n" "st1 {v28.4s, v29.4s}, [%6] \n" "st1 {v30.4s, v31.4s}, [%7] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(output4), // %4 "=r"(output5), // %5 "=r"(output6), // %6 "=r"(output7), // %7 "=r"(vb), // %8 "=r"(va) // %9 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(output4), "5"(output5), "6"(output6), "7"(output7), "8"(vb), "9"(va), "r"(L), // %20 "r"(biasptr) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; float sum4[8] = {0}; float sum5[8] = {0}; float sum6[8] = {0}; float sum7[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; va += 8; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; sum4[n] += va[4] * vb[n+8]; sum5[n] += va[5] * vb[n+8]; sum6[n] += va[6] * vb[n+8]; sum7[n] += va[7] * vb[n+8]; va += 8; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; sum4[n] += va[4] * vb[n+16]; sum5[n] += va[5] * vb[n+16]; sum6[n] += va[6] * vb[n+16]; sum7[n] += va[7] * vb[n+16]; va += 8; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; sum4[n] += va[4] * vb[n+24]; sum5[n] += va[5] * vb[n+24]; sum6[n] += va[6] * vb[n+24]; sum7[n] += va[7] * vb[n+24]; va += 8; sum0[n] += va[0] * vb[n+32]; sum1[n] += va[1] * vb[n+32]; sum2[n] += va[2] * vb[n+32]; sum3[n] += va[3] * vb[n+32]; sum4[n] += va[4] * vb[n+32]; sum5[n] += va[5] * vb[n+32]; sum6[n] += va[6] * vb[n+32]; sum7[n] += va[7] * vb[n+32]; va += 8; sum0[n] += va[0] * vb[n+40]; sum1[n] += va[1] * vb[n+40]; sum2[n] += va[2] * vb[n+40]; sum3[n] += va[3] * vb[n+40]; sum4[n] += va[4] * vb[n+40]; sum5[n] += va[5] * vb[n+40]; sum6[n] += va[6] * vb[n+40]; sum7[n] += va[7] * vb[n+40]; va += 8; sum0[n] += va[0] * vb[n+48]; sum1[n] += va[1] * vb[n+48]; sum2[n] += va[2] * vb[n+48]; sum3[n] += va[3] * vb[n+48]; sum4[n] += va[4] * vb[n+48]; sum5[n] += va[5] * vb[n+48]; sum6[n] += va[6] * vb[n+48]; sum7[n] += va[7] * vb[n+48]; va += 8; sum0[n] += va[0] * vb[n+56]; sum1[n] += va[1] * vb[n+56]; sum2[n] += va[2] * vb[n+56]; sum3[n] += va[3] * vb[n+56]; sum4[n] += va[4] * vb[n+56]; sum5[n] += va[5] * vb[n+56]; sum6[n] += va[6] * vb[n+56]; sum7[n] += va[7] * vb[n+56]; va -= 56; } va += 64; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n=0; n<8; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; output4[n] = sum4[n] + biasptr[4]; output5[n] = sum5[n] + biasptr[5]; output6[n] = sum6[n] + biasptr[6]; output7[n] = sum7[n] + biasptr[7]; } #endif // __ARM_NEON output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/8 + j%8); const float* va = kernel_tm.channel(i/8); #if __ARM_NEON asm volatile( "ld1 {v14.4s, v15.4s}, [%21] \n" // sum0_7 inital with bias "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "eor v20.16b, v20.16b, v20.16b \n" // sum4 "eor v21.16b, v21.16b, v21.16b \n" // sum5 "eor v22.16b, v22.16b, v22.16b \n" // sum6 "eor v23.16b, v23.16b, v23.16b \n" // sum7 "lsr w4, %w20, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" // k "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v8.4s}, [%8], #16 \n" // d // k0 "fmla v16.4s, v0.4s, v8.s[0] \n"// sum0 += (k00-k70) * a00 "fmla v17.4s, v1.4s, v8.s[0] \n"// "fmla v18.4s, v2.4s, v8.s[1] \n"// sum1 += (k01-k71) * a10 "fmla v19.4s, v3.4s, v8.s[1] \n"// "fmla v20.4s, v4.4s, v8.s[2] \n"// sum2 += (k02-k72) * a20 "fmla v21.4s, v5.4s, v8.s[2] \n"// "fmla v22.4s, v6.4s, v8.s[3] \n"// sum3 += (k03-k73) * a30 "fmla v23.4s, v7.4s, v8.s[3] \n"// "subs w4, w4, #1 \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "fadd v16.4s, v16.4s, v20.4s \n" "fadd v17.4s, v17.4s, v21.4s \n" "fadd v14.4s, v14.4s, v16.4s \n" "fadd v15.4s, v15.4s, v17.4s \n" "1: \n" // remain loop "and w4, %w20, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "prfm pldl1keep, [%8, #32] \n" "ld1r {v8.4s}, [%8], #4 \n" // k0 "fmla v14.4s, v8.4s, v0.4s \n"// sum0 += (k00-k70) * a00 "fmla v15.4s, v8.4s, v1.4s \n"// "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v14.s}[0], [%0] \n" "st1 {v14.s}[1], [%1] \n" "st1 {v14.s}[2], [%2] \n" "st1 {v14.s}[3], [%3] \n" "st1 {v15.s}[0], [%4] \n" "st1 {v15.s}[1], [%5] \n" "st1 {v15.s}[2], [%6] \n" "st1 {v15.s}[3], [%7] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(output4), // %4 "=r"(output5), // %5 "=r"(output6), // %6 "=r"(output7), // %7 "=r"(vb), // %8 "=r"(va) // %9 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(output4), "5"(output5), "6"(output6), "7"(output7), "8"(vb), "9"(va), "r"(L), // %20 "r"(biasptr) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; float sum4 = biasptr[4]; float sum5 = biasptr[5]; float sum6 = biasptr[6]; float sum7 = biasptr[7]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif // __ARM_NEON output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } #endif // __aarch64__ nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = remain_outch_start + pp * 4; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); #if __ARM_NEON && __aarch64__ const float* va = kernel_tm.channel(i/8 + (i%8)/4); #else const float* va = kernel_tm.channel(i/4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%13] \n" "dup v16.4s, v0.s[0] \n"// sum0 "dup v17.4s, v0.s[0] \n" "dup v18.4s, v0.s[1] \n"// sum1 "dup v19.4s, v0.s[1] \n" "dup v20.4s, v0.s[2] \n"// sum2 "dup v21.4s, v0.s[2] \n" "dup v22.4s, v0.s[3] \n"// sum3 "dup v23.4s, v0.s[3] \n" "lsr w4, %w12, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" // kernel "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // data "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" "subs w4, w4, #1 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// "fmla v18.4s, v8.4s, v0.s[1] \n"// sum1 += (a00-a70) * k10 "fmla v19.4s, v9.4s, v0.s[1] \n"// "fmla v20.4s, v8.4s, v0.s[2] \n"// sum2 += (a00-a70) * k20 "fmla v21.4s, v9.4s, v0.s[2] \n"// "fmla v22.4s, v8.4s, v0.s[3] \n"// sum3 += (a00-a70) * k30 "fmla v23.4s, v9.4s, v0.s[3] \n"// // k1 "fmla v16.4s, v10.4s, v1.s[0] \n"// sum0 += (a01-a71) * k01 "fmla v17.4s, v11.4s, v1.s[0] \n"// "fmla v18.4s, v10.4s, v1.s[1] \n"// sum1 += (a01-a71) * k11 "fmla v19.4s, v11.4s, v1.s[1] \n"// "fmla v20.4s, v10.4s, v1.s[2] \n"// sum2 += (a01-a71) * k21 "fmla v21.4s, v11.4s, v1.s[2] \n"// "fmla v22.4s, v10.4s, v1.s[3] \n"// sum3 += (a01-a71) * k31 "fmla v23.4s, v11.4s, v1.s[3] \n"// // k2 "fmla v16.4s, v12.4s, v2.s[0] \n"// sum0 += (a02-a72) * k02 "fmla v17.4s, v13.4s, v2.s[0] \n"// "fmla v18.4s, v12.4s, v2.s[1] \n"// sum1 += (a02-a72) * k12 "fmla v19.4s, v13.4s, v2.s[1] \n"// "fmla v20.4s, v12.4s, v2.s[2] \n"// sum2 += (a02-a72) * k22 "fmla v21.4s, v13.4s, v2.s[2] \n"// "fmla v22.4s, v12.4s, v2.s[3] \n"// sum3 += (a02-a72) * k32 "fmla v23.4s, v13.4s, v2.s[3] \n"// // k3 "fmla v16.4s, v14.4s, v3.s[0] \n"// sum0 += (a03-a73) * k03 "fmla v17.4s, v15.4s, v3.s[0] \n"// "fmla v18.4s, v14.4s, v3.s[1] \n"// sum1 += (a03-a73) * k13 "fmla v19.4s, v15.4s, v3.s[1] \n"// "fmla v20.4s, v14.4s, v3.s[2] \n"// sum2 += (a03-a73) * k23 "fmla v21.4s, v15.4s, v3.s[2] \n"// "fmla v22.4s, v14.4s, v3.s[3] \n"// sum3 += (a03-a73) * k33 "fmla v23.4s, v15.4s, v3.s[3] \n"// "bne 0b \n" "1: \n" // remain loop "and w4, %w12, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4s}, [%5], #16 \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4s, v9.4s}, [%4], #32 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// "fmla v18.4s, v8.4s, v0.s[1] \n"// sum1 += (a00-a70) * k10 "fmla v19.4s, v9.4s, v0.s[1] \n"// "fmla v20.4s, v8.4s, v0.s[2] \n"// sum2 += (a00-a70) * k20 "fmla v21.4s, v9.4s, v0.s[2] \n"// "fmla v22.4s, v8.4s, v0.s[3] \n"// sum3 += (a00-a70) * k30 "fmla v23.4s, v9.4s, v0.s[3] \n"// "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0] \n" "st1 {v18.4s, v19.4s}, [%1] \n" "st1 {v20.4s, v21.4s}, [%2] \n" "st1 {v22.4s, v23.4s}, [%3] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(vb), // %4 "=r"(va) // %5 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(vb), "5"(va), "r"(L), // %12 "r"(biasptr) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else asm volatile( "vld1.f32 {d0-d1}, [%13] \n" "vdup.f32 q8, d0[0] \n" "vdup.f32 q9, d0[0] \n" "vdup.f32 q10, d0[1] \n" "vdup.f32 q11, d0[1] \n" "vdup.f32 q12, d1[0] \n" "vdup.f32 q13, d1[0] \n" "vdup.f32 q14, d1[1] \n" "vdup.f32 q15, d1[1] \n" "lsr r4, %12, #2 \n"// r4 = nn = L >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n"// kernel "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n"// data "vmla.f32 q8, q4, d0[0] \n"// sum0 = (a00-a07) * k00 "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n"// sum1 = (a00-a07) * k10 "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n"// sum2 = (a00-a07) * k20 "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n"// sum3 = (a00-a07) * k30 "vmla.f32 q15, q5, d1[1] \n" "vmla.f32 q8, q6, d2[0] \n"// sum0 += (a10-a17) * k01 "vmla.f32 q9, q7, d2[0] \n" "vmla.f32 q10, q6, d2[1] \n"// sum1 += (a10-a17) * k11 "vmla.f32 q11, q7, d2[1] \n" "vmla.f32 q12, q6, d3[0] \n"// sum2 += (a10-a17) * k21 "vmla.f32 q13, q7, d3[0] \n" "vmla.f32 q14, q6, d3[1] \n"// sum3 += (a10-a17) * k31 "vmla.f32 q15, q7, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n"// data "vmla.f32 q8, q4, d4[0] \n"// sum0 += (a20-a27) * k02 "vmla.f32 q9, q5, d4[0] \n" "vmla.f32 q10, q4, d4[1] \n"// sum1 += (a20-a27) * k12 "vmla.f32 q11, q5, d4[1] \n" "vmla.f32 q12, q4, d5[0] \n"// sum2 += (a20-a27) * k22 "vmla.f32 q13, q5, d5[0] \n" "vmla.f32 q14, q4, d5[1] \n"// sum3 += (a20-a27) * k32 "vmla.f32 q15, q5, d5[1] \n" "vmla.f32 q8, q6, d6[0] \n"// sum0 += (a30-a37) * k03 "vmla.f32 q9, q7, d6[0] \n" "vmla.f32 q10, q6, d6[1] \n"// sum1 += (a30-a37) * k13 "vmla.f32 q11, q7, d6[1] \n" "vmla.f32 q12, q6, d7[0] \n"// sum2 += (a30-a37) * k23 "vmla.f32 q13, q7, d7[0] \n" "vmla.f32 q14, q6, d7[1] \n"// sum3 += (a30-a37) * k33 "vmla.f32 q15, q7, d7[1] \n" "subs r4, r4, #1 \n" "bne 0b \n"// end for "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5]! \n" "pld [%4, #256] \n" "vld1.f32 {d8-d11}, [%4]! \n" "vmla.f32 q8, q4, d0[0] \n"// sum0 += (a00-a70) * k00 "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n"// sum1 += (a00-a70) * k10 "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n"// sum2 += (a00-a70) * k20 "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n"// sum3 += (a00-a70) * k30 "vmla.f32 q15, q5, d1[1] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.f32 {d16-d19}, [%0] \n" "vst1.f32 {d20-d23}, [%1] \n" "vst1.f32 {d24-d27}, [%2] \n" "vst1.f32 {d28-d31}, [%3] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(vb), // %4 "=r"(va) // %5 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(vb), "5"(va), "r"(L), // %12 "r"(biasptr) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; va += 4; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; va += 4; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; va += 4; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; va += 4; sum0[n] += va[0] * vb[n+32]; sum1[n] += va[1] * vb[n+32]; sum2[n] += va[2] * vb[n+32]; sum3[n] += va[3] * vb[n+32]; va += 4; sum0[n] += va[0] * vb[n+40]; sum1[n] += va[1] * vb[n+40]; sum2[n] += va[2] * vb[n+40]; sum3[n] += va[3] * vb[n+40]; va += 4; sum0[n] += va[0] * vb[n+48]; sum1[n] += va[1] * vb[n+48]; sum2[n] += va[2] * vb[n+48]; sum3[n] += va[3] * vb[n+48]; va += 4; sum0[n] += va[0] * vb[n+56]; sum1[n] += va[1] * vb[n+56]; sum2[n] += va[2] * vb[n+56]; sum3[n] += va[3] * vb[n+56]; va -= 28; } va += 32; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n=0; n<8; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; } #endif // __ARM_NEON output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j<N; j++) { float* vb = bottom_tm.channel(j/8 + j%8); #if __ARM_NEON && __aarch64__ const float* va = kernel_tm.channel(i/8 + (i%8)/4); #else const float* va = kernel_tm.channel(i/4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v14.4s}, [%13] \n" // sum0_3 inital with bias "lsr w4, %w12, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" // k "prfm pldl1keep, [%4, #128] \n" "ld1 {v8.4s}, [%4], #16 \n" // d "subs w4, w4, #1 \n" "fmla v16.4s, v0.4s, v8.s[0] \n"// sum0 += (k00-k30) * a00 "fmla v17.4s, v1.4s, v8.s[1] \n"// sum1 += (k01-k31) * a10 "fmla v18.4s, v2.4s, v8.s[2] \n"// sum2 += (k02-k32) * a20 "fmla v19.4s, v3.4s, v8.s[3] \n"// sum3 += (k03-k33) * a30 "bne 0b \n" "add v16.4s, v16.4s, v18.4s \n" "add v17.4s, v17.4s, v19.4s \n" "add v14.4s, v14.4s, v16.4s \n" "add v14.4s, v14.4s, v17.4s \n" "1: \n" // remain loop "and w4, %w12, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "prfm pldl1keep, [%4, #32] \n" "ld1r {v8.4s}, [%4], #4 \n" "subs w4, w4, #1 \n" // k0 "fmla v14.4s, v8.4s, v0.4s \n"// sum0 += (k00-k30) * a00 "bne 2b \n" "3: \n" "st1 {v14.s}[0], [%0] \n" "st1 {v14.s}[1], [%1] \n" "st1 {v14.s}[2], [%2] \n" "st1 {v14.s}[3], [%3] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(vb), // %4 "=r"(va) // %5 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(vb), "5"(va), "r"(L), // %12 "r"(biasptr) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); #else asm volatile( // inch loop "vld1.f32 {d24-d25}, [%13] \n" "lsr r4, %12, #2 \n"// r4 = nn = L >> 2 "cmp r4, #0 \n" "beq 1f \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "veor q10, q10, q10 \n" "veor q11, q11, q11 \n" "0: \n"// for(; nn != 0; nn--) "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n"// kernel "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4]! \n"// data "vmla.f32 q8, q0, d8[0] \n"// (k00-k30) * a00 "vmla.f32 q9, q1, d8[1] \n"// (k01-k31) * a01 "vmla.f32 q10, q2, d9[0] \n"// (k02-k32) * a02 "vmla.f32 q11, q3, d9[1] \n"// (k03-k33) * a03 "subs r4, r4, #1 \n" "bne 0b \n"// end for "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q12, q12, q8 \n" "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5]! \n" "pld [%4, #32] \n" "vld1.f32 {d8[],d9[]}, [%4]! \n" "subs r4, r4, #1 \n" "vmla.f32 q12, q0, q4 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.f32 {d24[0]}, [%0] \n" "vst1.f32 {d24[1]}, [%1] \n" "vst1.f32 {d25[0]}, [%2] \n" "vst1.f32 {d25[1]}, [%3] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(vb), // %4 "=r"(va) // %5 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(vb), "5"(va), "r"(L), // %12 "r"(biasptr) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __ARM_NEON output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_outch_start; i<outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); #if __ARM_NEON && __aarch64__ const float* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4); #else const float* va = kernel_tm.channel(i/4 + i%4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "dup v16.4s, %w7 \n" // sum0 "dup v17.4s, %w7 \n" // sum0n "lsr w4, %w6, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" // data "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// // k1 "fmla v16.4s, v10.4s, v0.s[1] \n"// sum0 += (a01-a71) * k01 "fmla v17.4s, v11.4s, v0.s[1] \n"// // k2 "fmla v16.4s, v12.4s, v0.s[2] \n"// sum0 += (a02-a72) * k02 "fmla v17.4s, v13.4s, v0.s[2] \n"// // k3 "fmla v16.4s, v14.4s, v0.s[3] \n"// sum0 += (a03-a73) * k03 "fmla v17.4s, v15.4s, v0.s[3] \n"// "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w6, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%2, #32] \n" "ld1r {v0.4s}, [%2], #4 \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1], #32 \n" "subs w4, w4, #1 \n" // k0 "fmla v16.4s, v0.4s, v8.4s \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v0.4s, v9.4s \n"// "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0] \n" : "=r"(output), // %0 "=r"(vb), // %1 "=r"(va) // %2 : "0"(output), "1"(vb), "2"(va), "r"(L), // %6 "r"(bias0) // %7 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17" ); #else asm volatile( "vdup.f32 q8, %7 \n" "vdup.f32 q9, %7 \n" // inch loop "lsr r4, %6, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "pld [%1, #512] \n" "vldm %1!, {d24-d31} \n" "vmla.f32 q8, q6, d0[1] \n" "vmla.f32 q9, q7, d0[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q12, d1[0] \n" "vmla.f32 q9, q13, d1[0] \n" "vmla.f32 q8, q14, d1[1] \n" "vmla.f32 q9, q15, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %6, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #256] \n" "vld1.f32 {d8-d11}, [%1]! \n" "pld [%2, #32] \n" "vld1.f32 {d0[],d1[]}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "vmla.f32 q9, q5, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0] \n" : "=r"(output), // %0 "=r"(vb), // %1 "=r"(va) // %2 : "0"(output), "1"(vb), "2"(va), "r"(L), // %6 "r"(bias0) // %7 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else float sum[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum[n] += va[0] * vb[n]; sum[n] += va[1] * vb[n+8]; sum[n] += va[2] * vb[n+16]; sum[n] += va[3] * vb[n+24]; sum[n] += va[4] * vb[n+32]; sum[n] += va[5] * vb[n+40]; sum[n] += va[6] * vb[n+48]; sum[n] += va[7] * vb[n+56]; } va += 8; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n=0; n<8; n++) { output[n] = sum[n] + bias0; } #endif // __ARM_NEON output += 8; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/8 + j%8); #if __ARM_NEON && __aarch64__ const float* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4); #else const float* va = kernel_tm.channel(i/4 + i%4); #endif // __ARM_NEON && __aarch64__ int k=0; #if __ARM_NEON float32x4_t _sum0 = vdupq_n_f32(0.f); for (; k+3<L; k+=4) { float32x4_t _p0 = vld1q_f32(vb); vb += 4; float32x4_t _k0 = vld1q_f32(va); va += 4; #if __aarch64__ _sum0 = vfmaq_f32(_sum0, _p0, _k0); #else _sum0 = vmlaq_f32(_sum0, _p0, _k0); #endif } #if __aarch64__ float sum0 = bias0 + vaddvq_f32(_sum0); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float sum0 = bias0 + vget_lane_f32(vpadd_f32(_ss, _ss), 0); #endif #else float sum0 = bias0; #endif // __ARM_NEON for (; k<L; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } }
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv_im2col_sgemm_transform_kernel_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size) { const float* kernel = _kernel; #if __ARM_NEON && __aarch64__ // kernel memory packed 8 x 8 kernel_tm.create(8*kernel_size, inch, outch/8 + (outch%8)/4 + outch%4); #else // kernel memory packed 4 x 8 kernel_tm.create(4*kernel_size, inch, outch/4 + outch%4); #endif int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; const float* k0 = kernel + (p+0)*inch*kernel_size; const float* k1 = kernel + (p+1)*inch*kernel_size; const float* k2 = kernel + (p+2)*inch*kernel_size; const float* k3 = kernel + (p+3)*inch*kernel_size; const float* k4 = kernel + (p+4)*inch*kernel_size; const float* k5 = kernel + (p+5)*inch*kernel_size; const float* k6 = kernel + (p+6)*inch*kernel_size; const float* k7 = kernel + (p+7)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/8); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } #endif nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; const float* k0 = kernel + (p+0)*inch*kernel_size; const float* k1 = kernel + (p+1)*inch*kernel_size; const float* k2 = kernel + (p+2)*inch*kernel_size; const float* k3 = kernel + (p+3)*inch*kernel_size; #if __ARM_NEON && __aarch64__ float* ktmp = kernel_tm.channel(p/8 + (p%8)/4); #else float* ktmp = kernel_tm.channel(p/4); #endif // __ARM_NEON && __aarch64__ for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { const float* k0 = kernel + (p+0)*inch*kernel_size; #if __ARM_NEON && __aarch64__ float* ktmp = kernel_tm.channel(p/8 + (p%8)/4 + p%4); #else float* ktmp = kernel_tm.channel(p/4 + p%4); #endif // __ARM_NEON && __aarch64__ for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } static void conv_im2col_sgemm_neon(const Mat &bottom_blob, Mat &top_blob, const Mat & kernel_tm, const Mat& _bias, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // im2col Mat bottom_im2col(outw*outh, kernel_h*kernel_w*inch, elemsize, opt.workspace_allocator); { const int stride = kernel_h*kernel_w*outw*outh; float* ret = (float*)bottom_im2col; for (int p=0; p<inch; p++) { const float* input = bottom_blob.channel(p); int retID = stride * p; for (int u=0; u<kernel_h; u++) { for (int v=0; v<kernel_w; v++) { for (int i=0; i<outh; i++) { for (int j=0; j<outw; j++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // bottom_im2col memory packed 8 x 8 Mat bottom_tm(8*kernel_size, inch, out_size/8 + out_size%8, elemsize, opt.workspace_allocator); { int nn_size = out_size >> 3; int remain_size_start = nn_size << 3; for (int ii=0; ii<nn_size; ii++) { int i = ii * 8; const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/8); for (int q=0; q<inch*kernel_size; q++) { #if __ARM_NEON #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1] \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "cc", "memory", "v0", "v1" ); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0] \n" "vst1.f32 {d0-d3}, [%1] \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1" ); #endif // __aarch64__ #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; #endif // __ARM_NEON tmpptr += 8; img0 += out_size; } } for (int i=remain_size_start; i<out_size; i++) { const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/8 + i%8); for (int q=0; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += out_size; } } } // sgemm(int M, int N, int L, float* A, float* B, float* C) { //int M = outch; // outch int N = outw * outh; // outsize or out stride int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; #if __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int i = pp * 8; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); float* output4 = top_blob.channel(i+4); float* output5 = top_blob.channel(i+5); float* output6 = top_blob.channel(i+6); float* output7 = top_blob.channel(i+7); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); const float* va = kernel_tm.channel(i/8); #if __ARM_NEON asm volatile( "ld1 {v0.4s, v1.4s}, [%21] \n" "dup v16.4s, v0.s[0] \n"// sum0 "dup v17.4s, v0.s[0] \n" "dup v18.4s, v0.s[1] \n"// sum1 "dup v19.4s, v0.s[1] \n" "dup v20.4s, v0.s[2] \n"// sum2 "dup v21.4s, v0.s[2] \n" "dup v22.4s, v0.s[3] \n"// sum3 "dup v23.4s, v0.s[3] \n" "dup v24.4s, v1.s[0] \n"// sum4 "dup v25.4s, v1.s[0] \n" "dup v26.4s, v1.s[1] \n"// sum5 "dup v27.4s, v1.s[1] \n" "dup v28.4s, v1.s[2] \n"// sum6 "dup v29.4s, v1.s[2] \n" "dup v30.4s, v1.s[3] \n"// sum7 "dup v31.4s, v1.s[3] \n" "lsr w4, %w20, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" // kernel "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" // data "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// "fmla v18.4s, v8.4s, v0.s[1] \n"// sum1 += (a00-a70) * k10 "fmla v19.4s, v9.4s, v0.s[1] \n"// "fmla v20.4s, v8.4s, v0.s[2] \n"// sum2 += (a00-a70) * k20 "fmla v21.4s, v9.4s, v0.s[2] \n"// "fmla v22.4s, v8.4s, v0.s[3] \n"// sum3 += (a00-a70) * k30 "fmla v23.4s, v9.4s, v0.s[3] \n"// "fmla v24.4s, v8.4s, v1.s[0] \n"// sum4 += (a00-a70) * k40 "fmla v25.4s, v9.4s, v1.s[0] \n"// "fmla v26.4s, v8.4s, v1.s[1] \n"// sum5 += (a00-a70) * k50 "fmla v27.4s, v9.4s, v1.s[1] \n"// "fmla v28.4s, v8.4s, v1.s[2] \n"// sum6 += (a00-a70) * k60 "fmla v29.4s, v9.4s, v1.s[2] \n"// "fmla v30.4s, v8.4s, v1.s[3] \n"// sum7 += (a00-a70) * k70 "fmla v31.4s, v9.4s, v1.s[3] \n"// // k1 "fmla v16.4s, v10.4s, v2.s[0] \n"// sum0 += (a01-a71) * k01 "fmla v17.4s, v11.4s, v2.s[0] \n"// "fmla v18.4s, v10.4s, v2.s[1] \n"// sum1 += (a01-a71) * k11 "fmla v19.4s, v11.4s, v2.s[1] \n"// "fmla v20.4s, v10.4s, v2.s[2] \n"// sum2 += (a01-a71) * k21 "fmla v21.4s, v11.4s, v2.s[2] \n"// "fmla v22.4s, v10.4s, v2.s[3] \n"// sum3 += (a01-a71) * k31 "fmla v23.4s, v11.4s, v2.s[3] \n"// "fmla v24.4s, v10.4s, v3.s[0] \n"// sum4 += (a01-a71) * k41 "fmla v25.4s, v11.4s, v3.s[0] \n"// "fmla v26.4s, v10.4s, v3.s[1] \n"// sum5 += (a01-a71) * k51 "fmla v27.4s, v11.4s, v3.s[1] \n"// "fmla v28.4s, v10.4s, v3.s[2] \n"// sum6 += (a01-a71) * k61 "fmla v29.4s, v11.4s, v3.s[2] \n"// "fmla v30.4s, v10.4s, v3.s[3] \n"// sum7 += (a01-a71) * k71 "fmla v31.4s, v11.4s, v3.s[3] \n"// // k2 "fmla v16.4s, v12.4s, v4.s[0] \n"// sum0 += (a02-a72) * k02 "fmla v17.4s, v13.4s, v4.s[0] \n"// "fmla v18.4s, v12.4s, v4.s[1] \n"// sum1 += (a02-a72) * k12 "fmla v19.4s, v13.4s, v4.s[1] \n"// "fmla v20.4s, v12.4s, v4.s[2] \n"// sum2 += (a02-a72) * k22 "fmla v21.4s, v13.4s, v4.s[2] \n"// "fmla v22.4s, v12.4s, v4.s[3] \n"// sum3 += (a02-a72) * k32 "fmla v23.4s, v13.4s, v4.s[3] \n"// "fmla v24.4s, v12.4s, v5.s[0] \n"// sum4 += (a02-a72) * k42 "fmla v25.4s, v13.4s, v5.s[0] \n"// "fmla v26.4s, v12.4s, v5.s[1] \n"// sum5 += (a02-a72) * k52 "fmla v27.4s, v13.4s, v5.s[1] \n"// "fmla v28.4s, v12.4s, v5.s[2] \n"// sum6 += (a02-a72) * k62 "fmla v29.4s, v13.4s, v5.s[2] \n"// "fmla v30.4s, v12.4s, v5.s[3] \n"// sum7 += (a02-a72) * k72 "fmla v31.4s, v13.4s, v5.s[3] \n"// // k3 "fmla v16.4s, v14.4s, v6.s[0] \n"// sum0 += (a03-a73) * k03 "fmla v17.4s, v15.4s, v6.s[0] \n"// "fmla v18.4s, v14.4s, v6.s[1] \n"// sum1 += (a03-a73) * k13 "fmla v19.4s, v15.4s, v6.s[1] \n"// "fmla v20.4s, v14.4s, v6.s[2] \n"// sum2 += (a03-a73) * k23 "fmla v21.4s, v15.4s, v6.s[2] \n"// "fmla v22.4s, v14.4s, v6.s[3] \n"// sum3 += (a03-a73) * k33 "fmla v23.4s, v15.4s, v6.s[3] \n"// "fmla v24.4s, v14.4s, v7.s[0] \n"// sum4 += (a03-a73) * k43 "fmla v25.4s, v15.4s, v7.s[0] \n"// "fmla v26.4s, v14.4s, v7.s[1] \n"// sum5 += (a03-a73) * k53 "fmla v27.4s, v15.4s, v7.s[1] \n"// "fmla v28.4s, v14.4s, v7.s[2] \n"// sum6 += (a03-a73) * k63 "fmla v29.4s, v15.4s, v7.s[2] \n"// "fmla v30.4s, v14.4s, v7.s[3] \n"// sum7 += (a03-a73) * k73 "fmla v31.4s, v15.4s, v7.s[3] \n"// "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w20, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v8.4s, v9.4s}, [%8], #32 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// "fmla v18.4s, v8.4s, v0.s[1] \n"// sum1 += (a00-a70) * k10 "fmla v19.4s, v9.4s, v0.s[1] \n"// "fmla v20.4s, v8.4s, v0.s[2] \n"// sum2 += (a00-a70) * k20 "fmla v21.4s, v9.4s, v0.s[2] \n"// "fmla v22.4s, v8.4s, v0.s[3] \n"// sum3 += (a00-a70) * k30 "fmla v23.4s, v9.4s, v0.s[3] \n"// "fmla v24.4s, v8.4s, v1.s[0] \n"// sum4 += (a00-a70) * k40 "fmla v25.4s, v9.4s, v1.s[0] \n"// "fmla v26.4s, v8.4s, v1.s[1] \n"// sum5 += (a00-a70) * k50 "fmla v27.4s, v9.4s, v1.s[1] \n"// "fmla v28.4s, v8.4s, v1.s[2] \n"// sum6 += (a00-a70) * k60 "fmla v29.4s, v9.4s, v1.s[2] \n"// "fmla v30.4s, v8.4s, v1.s[3] \n"// sum7 += (a00-a70) * k70 "fmla v31.4s, v9.4s, v1.s[3] \n"// "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0] \n" "st1 {v18.4s, v19.4s}, [%1] \n" "st1 {v20.4s, v21.4s}, [%2] \n" "st1 {v22.4s, v23.4s}, [%3] \n" "st1 {v24.4s, v25.4s}, [%4] \n" "st1 {v26.4s, v27.4s}, [%5] \n" "st1 {v28.4s, v29.4s}, [%6] \n" "st1 {v30.4s, v31.4s}, [%7] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(output4), // %4 "=r"(output5), // %5 "=r"(output6), // %6 "=r"(output7), // %7 "=r"(vb), // %8 "=r"(va) // %9 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(output4), "5"(output5), "6"(output6), "7"(output7), "8"(vb), "9"(va), "r"(L), // %20 "r"(biasptr) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; float sum4[8] = {0}; float sum5[8] = {0}; float sum6[8] = {0}; float sum7[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; va += 8; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; sum4[n] += va[4] * vb[n+8]; sum5[n] += va[5] * vb[n+8]; sum6[n] += va[6] * vb[n+8]; sum7[n] += va[7] * vb[n+8]; va += 8; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; sum4[n] += va[4] * vb[n+16]; sum5[n] += va[5] * vb[n+16]; sum6[n] += va[6] * vb[n+16]; sum7[n] += va[7] * vb[n+16]; va += 8; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; sum4[n] += va[4] * vb[n+24]; sum5[n] += va[5] * vb[n+24]; sum6[n] += va[6] * vb[n+24]; sum7[n] += va[7] * vb[n+24]; va += 8; sum0[n] += va[0] * vb[n+32]; sum1[n] += va[1] * vb[n+32]; sum2[n] += va[2] * vb[n+32]; sum3[n] += va[3] * vb[n+32]; sum4[n] += va[4] * vb[n+32]; sum5[n] += va[5] * vb[n+32]; sum6[n] += va[6] * vb[n+32]; sum7[n] += va[7] * vb[n+32]; va += 8; sum0[n] += va[0] * vb[n+40]; sum1[n] += va[1] * vb[n+40]; sum2[n] += va[2] * vb[n+40]; sum3[n] += va[3] * vb[n+40]; sum4[n] += va[4] * vb[n+40]; sum5[n] += va[5] * vb[n+40]; sum6[n] += va[6] * vb[n+40]; sum7[n] += va[7] * vb[n+40]; va += 8; sum0[n] += va[0] * vb[n+48]; sum1[n] += va[1] * vb[n+48]; sum2[n] += va[2] * vb[n+48]; sum3[n] += va[3] * vb[n+48]; sum4[n] += va[4] * vb[n+48]; sum5[n] += va[5] * vb[n+48]; sum6[n] += va[6] * vb[n+48]; sum7[n] += va[7] * vb[n+48]; va += 8; sum0[n] += va[0] * vb[n+56]; sum1[n] += va[1] * vb[n+56]; sum2[n] += va[2] * vb[n+56]; sum3[n] += va[3] * vb[n+56]; sum4[n] += va[4] * vb[n+56]; sum5[n] += va[5] * vb[n+56]; sum6[n] += va[6] * vb[n+56]; sum7[n] += va[7] * vb[n+56]; va -= 56; } va += 64; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n=0; n<8; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; output4[n] = sum4[n] + biasptr[4]; output5[n] = sum5[n] + biasptr[5]; output6[n] = sum6[n] + biasptr[6]; output7[n] = sum7[n] + biasptr[7]; } #endif // __ARM_NEON output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/8 + j%8); const float* va = kernel_tm.channel(i/8); #if __ARM_NEON asm volatile( "ld1 {v14.4s, v15.4s}, [%21] \n" // sum0_7 inital with bias "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "eor v20.16b, v20.16b, v20.16b \n" // sum4 "eor v21.16b, v21.16b, v21.16b \n" // sum5 "eor v22.16b, v22.16b, v22.16b \n" // sum6 "eor v23.16b, v23.16b, v23.16b \n" // sum7 "lsr w4, %w20, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" // k "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v8.4s}, [%8], #16 \n" // d // k0 "fmla v16.4s, v0.4s, v8.s[0] \n"// sum0 += (k00-k70) * a00 "fmla v17.4s, v1.4s, v8.s[0] \n"// "fmla v18.4s, v2.4s, v8.s[1] \n"// sum1 += (k01-k71) * a10 "fmla v19.4s, v3.4s, v8.s[1] \n"// "fmla v20.4s, v4.4s, v8.s[2] \n"// sum2 += (k02-k72) * a20 "fmla v21.4s, v5.4s, v8.s[2] \n"// "fmla v22.4s, v6.4s, v8.s[3] \n"// sum3 += (k03-k73) * a30 "fmla v23.4s, v7.4s, v8.s[3] \n"// "subs w4, w4, #1 \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "fadd v16.4s, v16.4s, v20.4s \n" "fadd v17.4s, v17.4s, v21.4s \n" "fadd v14.4s, v14.4s, v16.4s \n" "fadd v15.4s, v15.4s, v17.4s \n" "1: \n" // remain loop "and w4, %w20, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "prfm pldl1keep, [%8, #32] \n" "ld1r {v8.4s}, [%8], #4 \n" // k0 "fmla v14.4s, v8.4s, v0.4s \n"// sum0 += (k00-k70) * a00 "fmla v15.4s, v8.4s, v1.4s \n"// "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v14.s}[0], [%0] \n" "st1 {v14.s}[1], [%1] \n" "st1 {v14.s}[2], [%2] \n" "st1 {v14.s}[3], [%3] \n" "st1 {v15.s}[0], [%4] \n" "st1 {v15.s}[1], [%5] \n" "st1 {v15.s}[2], [%6] \n" "st1 {v15.s}[3], [%7] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(output4), // %4 "=r"(output5), // %5 "=r"(output6), // %6 "=r"(output7), // %7 "=r"(vb), // %8 "=r"(va) // %9 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(output4), "5"(output5), "6"(output6), "7"(output7), "8"(vb), "9"(va), "r"(L), // %20 "r"(biasptr) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; float sum4 = biasptr[4]; float sum5 = biasptr[5]; float sum6 = biasptr[6]; float sum7 = biasptr[7]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif // __ARM_NEON output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } #endif // __aarch64__ nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int i = remain_outch_start + pp * 4; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); #if __ARM_NEON && __aarch64__ const float* va = kernel_tm.channel(i/8 + (i%8)/4); #else const float* va = kernel_tm.channel(i/4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%13] \n" "dup v16.4s, v0.s[0] \n"// sum0 "dup v17.4s, v0.s[0] \n" "dup v18.4s, v0.s[1] \n"// sum1 "dup v19.4s, v0.s[1] \n" "dup v20.4s, v0.s[2] \n"// sum2 "dup v21.4s, v0.s[2] \n" "dup v22.4s, v0.s[3] \n"// sum3 "dup v23.4s, v0.s[3] \n" "lsr w4, %w12, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" // kernel "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // data "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" "subs w4, w4, #1 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// "fmla v18.4s, v8.4s, v0.s[1] \n"// sum1 += (a00-a70) * k10 "fmla v19.4s, v9.4s, v0.s[1] \n"// "fmla v20.4s, v8.4s, v0.s[2] \n"// sum2 += (a00-a70) * k20 "fmla v21.4s, v9.4s, v0.s[2] \n"// "fmla v22.4s, v8.4s, v0.s[3] \n"// sum3 += (a00-a70) * k30 "fmla v23.4s, v9.4s, v0.s[3] \n"// // k1 "fmla v16.4s, v10.4s, v1.s[0] \n"// sum0 += (a01-a71) * k01 "fmla v17.4s, v11.4s, v1.s[0] \n"// "fmla v18.4s, v10.4s, v1.s[1] \n"// sum1 += (a01-a71) * k11 "fmla v19.4s, v11.4s, v1.s[1] \n"// "fmla v20.4s, v10.4s, v1.s[2] \n"// sum2 += (a01-a71) * k21 "fmla v21.4s, v11.4s, v1.s[2] \n"// "fmla v22.4s, v10.4s, v1.s[3] \n"// sum3 += (a01-a71) * k31 "fmla v23.4s, v11.4s, v1.s[3] \n"// // k2 "fmla v16.4s, v12.4s, v2.s[0] \n"// sum0 += (a02-a72) * k02 "fmla v17.4s, v13.4s, v2.s[0] \n"// "fmla v18.4s, v12.4s, v2.s[1] \n"// sum1 += (a02-a72) * k12 "fmla v19.4s, v13.4s, v2.s[1] \n"// "fmla v20.4s, v12.4s, v2.s[2] \n"// sum2 += (a02-a72) * k22 "fmla v21.4s, v13.4s, v2.s[2] \n"// "fmla v22.4s, v12.4s, v2.s[3] \n"// sum3 += (a02-a72) * k32 "fmla v23.4s, v13.4s, v2.s[3] \n"// // k3 "fmla v16.4s, v14.4s, v3.s[0] \n"// sum0 += (a03-a73) * k03 "fmla v17.4s, v15.4s, v3.s[0] \n"// "fmla v18.4s, v14.4s, v3.s[1] \n"// sum1 += (a03-a73) * k13 "fmla v19.4s, v15.4s, v3.s[1] \n"// "fmla v20.4s, v14.4s, v3.s[2] \n"// sum2 += (a03-a73) * k23 "fmla v21.4s, v15.4s, v3.s[2] \n"// "fmla v22.4s, v14.4s, v3.s[3] \n"// sum3 += (a03-a73) * k33 "fmla v23.4s, v15.4s, v3.s[3] \n"// "bne 0b \n" "1: \n" // remain loop "and w4, %w12, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4s}, [%5], #16 \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4s, v9.4s}, [%4], #32 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// "fmla v18.4s, v8.4s, v0.s[1] \n"// sum1 += (a00-a70) * k10 "fmla v19.4s, v9.4s, v0.s[1] \n"// "fmla v20.4s, v8.4s, v0.s[2] \n"// sum2 += (a00-a70) * k20 "fmla v21.4s, v9.4s, v0.s[2] \n"// "fmla v22.4s, v8.4s, v0.s[3] \n"// sum3 += (a00-a70) * k30 "fmla v23.4s, v9.4s, v0.s[3] \n"// "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0] \n" "st1 {v18.4s, v19.4s}, [%1] \n" "st1 {v20.4s, v21.4s}, [%2] \n" "st1 {v22.4s, v23.4s}, [%3] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(vb), // %4 "=r"(va) // %5 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(vb), "5"(va), "r"(L), // %12 "r"(biasptr) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else asm volatile( "vld1.f32 {d0-d1}, [%13] \n" "vdup.f32 q8, d0[0] \n" "vdup.f32 q9, d0[0] \n" "vdup.f32 q10, d0[1] \n" "vdup.f32 q11, d0[1] \n" "vdup.f32 q12, d1[0] \n" "vdup.f32 q13, d1[0] \n" "vdup.f32 q14, d1[1] \n" "vdup.f32 q15, d1[1] \n" "lsr r4, %12, #2 \n"// r4 = nn = L >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n"// kernel "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n"// data "vmla.f32 q8, q4, d0[0] \n"// sum0 = (a00-a07) * k00 "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n"// sum1 = (a00-a07) * k10 "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n"// sum2 = (a00-a07) * k20 "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n"// sum3 = (a00-a07) * k30 "vmla.f32 q15, q5, d1[1] \n" "vmla.f32 q8, q6, d2[0] \n"// sum0 += (a10-a17) * k01 "vmla.f32 q9, q7, d2[0] \n" "vmla.f32 q10, q6, d2[1] \n"// sum1 += (a10-a17) * k11 "vmla.f32 q11, q7, d2[1] \n" "vmla.f32 q12, q6, d3[0] \n"// sum2 += (a10-a17) * k21 "vmla.f32 q13, q7, d3[0] \n" "vmla.f32 q14, q6, d3[1] \n"// sum3 += (a10-a17) * k31 "vmla.f32 q15, q7, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n"// data "vmla.f32 q8, q4, d4[0] \n"// sum0 += (a20-a27) * k02 "vmla.f32 q9, q5, d4[0] \n" "vmla.f32 q10, q4, d4[1] \n"// sum1 += (a20-a27) * k12 "vmla.f32 q11, q5, d4[1] \n" "vmla.f32 q12, q4, d5[0] \n"// sum2 += (a20-a27) * k22 "vmla.f32 q13, q5, d5[0] \n" "vmla.f32 q14, q4, d5[1] \n"// sum3 += (a20-a27) * k32 "vmla.f32 q15, q5, d5[1] \n" "vmla.f32 q8, q6, d6[0] \n"// sum0 += (a30-a37) * k03 "vmla.f32 q9, q7, d6[0] \n" "vmla.f32 q10, q6, d6[1] \n"// sum1 += (a30-a37) * k13 "vmla.f32 q11, q7, d6[1] \n" "vmla.f32 q12, q6, d7[0] \n"// sum2 += (a30-a37) * k23 "vmla.f32 q13, q7, d7[0] \n" "vmla.f32 q14, q6, d7[1] \n"// sum3 += (a30-a37) * k33 "vmla.f32 q15, q7, d7[1] \n" "subs r4, r4, #1 \n" "bne 0b \n"// end for "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5]! \n" "pld [%4, #256] \n" "vld1.f32 {d8-d11}, [%4]! \n" "vmla.f32 q8, q4, d0[0] \n"// sum0 += (a00-a70) * k00 "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n"// sum1 += (a00-a70) * k10 "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n"// sum2 += (a00-a70) * k20 "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n"// sum3 += (a00-a70) * k30 "vmla.f32 q15, q5, d1[1] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.f32 {d16-d19}, [%0] \n" "vst1.f32 {d20-d23}, [%1] \n" "vst1.f32 {d24-d27}, [%2] \n" "vst1.f32 {d28-d31}, [%3] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(vb), // %4 "=r"(va) // %5 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(vb), "5"(va), "r"(L), // %12 "r"(biasptr) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; va += 4; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; va += 4; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; va += 4; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; va += 4; sum0[n] += va[0] * vb[n+32]; sum1[n] += va[1] * vb[n+32]; sum2[n] += va[2] * vb[n+32]; sum3[n] += va[3] * vb[n+32]; va += 4; sum0[n] += va[0] * vb[n+40]; sum1[n] += va[1] * vb[n+40]; sum2[n] += va[2] * vb[n+40]; sum3[n] += va[3] * vb[n+40]; va += 4; sum0[n] += va[0] * vb[n+48]; sum1[n] += va[1] * vb[n+48]; sum2[n] += va[2] * vb[n+48]; sum3[n] += va[3] * vb[n+48]; va += 4; sum0[n] += va[0] * vb[n+56]; sum1[n] += va[1] * vb[n+56]; sum2[n] += va[2] * vb[n+56]; sum3[n] += va[3] * vb[n+56]; va -= 28; } va += 32; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n=0; n<8; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; } #endif // __ARM_NEON output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j<N; j++) { float* vb = bottom_tm.channel(j/8 + j%8); #if __ARM_NEON && __aarch64__ const float* va = kernel_tm.channel(i/8 + (i%8)/4); #else const float* va = kernel_tm.channel(i/4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v14.4s}, [%13] \n" // sum0_3 inital with bias "lsr w4, %w12, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" // k "prfm pldl1keep, [%4, #128] \n" "ld1 {v8.4s}, [%4], #16 \n" // d "subs w4, w4, #1 \n" "fmla v16.4s, v0.4s, v8.s[0] \n"// sum0 += (k00-k30) * a00 "fmla v17.4s, v1.4s, v8.s[1] \n"// sum1 += (k01-k31) * a10 "fmla v18.4s, v2.4s, v8.s[2] \n"// sum2 += (k02-k32) * a20 "fmla v19.4s, v3.4s, v8.s[3] \n"// sum3 += (k03-k33) * a30 "bne 0b \n" "add v16.4s, v16.4s, v18.4s \n" "add v17.4s, v17.4s, v19.4s \n" "add v14.4s, v14.4s, v16.4s \n" "add v14.4s, v14.4s, v17.4s \n" "1: \n" // remain loop "and w4, %w12, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "prfm pldl1keep, [%4, #32] \n" "ld1r {v8.4s}, [%4], #4 \n" "subs w4, w4, #1 \n" // k0 "fmla v14.4s, v8.4s, v0.4s \n"// sum0 += (k00-k30) * a00 "bne 2b \n" "3: \n" "st1 {v14.s}[0], [%0] \n" "st1 {v14.s}[1], [%1] \n" "st1 {v14.s}[2], [%2] \n" "st1 {v14.s}[3], [%3] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(vb), // %4 "=r"(va) // %5 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(vb), "5"(va), "r"(L), // %12 "r"(biasptr) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); #else asm volatile( // inch loop "vld1.f32 {d24-d25}, [%13] \n" "lsr r4, %12, #2 \n"// r4 = nn = L >> 2 "cmp r4, #0 \n" "beq 1f \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "veor q10, q10, q10 \n" "veor q11, q11, q11 \n" "0: \n"// for(; nn != 0; nn--) "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n"// kernel "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4]! \n"// data "vmla.f32 q8, q0, d8[0] \n"// (k00-k30) * a00 "vmla.f32 q9, q1, d8[1] \n"// (k01-k31) * a01 "vmla.f32 q10, q2, d9[0] \n"// (k02-k32) * a02 "vmla.f32 q11, q3, d9[1] \n"// (k03-k33) * a03 "subs r4, r4, #1 \n" "bne 0b \n"// end for "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q12, q12, q8 \n" "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5]! \n" "pld [%4, #32] \n" "vld1.f32 {d8[],d9[]}, [%4]! \n" "subs r4, r4, #1 \n" "vmla.f32 q12, q0, q4 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.f32 {d24[0]}, [%0] \n" "vst1.f32 {d24[1]}, [%1] \n" "vst1.f32 {d25[0]}, [%2] \n" "vst1.f32 {d25[1]}, [%3] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(vb), // %4 "=r"(va) // %5 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(vb), "5"(va), "r"(L), // %12 "r"(biasptr) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __ARM_NEON output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; for (int i=remain_outch_start; i<outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); #if __ARM_NEON && __aarch64__ const float* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4); #else const float* va = kernel_tm.channel(i/4 + i%4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "dup v16.4s, %w7 \n" // sum0 "dup v17.4s, %w7 \n" // sum0n "lsr w4, %w6, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" // data "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// // k1 "fmla v16.4s, v10.4s, v0.s[1] \n"// sum0 += (a01-a71) * k01 "fmla v17.4s, v11.4s, v0.s[1] \n"// // k2 "fmla v16.4s, v12.4s, v0.s[2] \n"// sum0 += (a02-a72) * k02 "fmla v17.4s, v13.4s, v0.s[2] \n"// // k3 "fmla v16.4s, v14.4s, v0.s[3] \n"// sum0 += (a03-a73) * k03 "fmla v17.4s, v15.4s, v0.s[3] \n"// "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w6, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%2, #32] \n" "ld1r {v0.4s}, [%2], #4 \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1], #32 \n" "subs w4, w4, #1 \n" // k0 "fmla v16.4s, v0.4s, v8.4s \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v0.4s, v9.4s \n"// "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0] \n" : "=r"(output), // %0 "=r"(vb), // %1 "=r"(va) // %2 : "0"(output), "1"(vb), "2"(va), "r"(L), // %6 "r"(bias0) // %7 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17" ); #else asm volatile( "vdup.f32 q8, %7 \n" "vdup.f32 q9, %7 \n" // inch loop "lsr r4, %6, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "pld [%1, #512] \n" "vldm %1!, {d24-d31} \n" "vmla.f32 q8, q6, d0[1] \n" "vmla.f32 q9, q7, d0[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q12, d1[0] \n" "vmla.f32 q9, q13, d1[0] \n" "vmla.f32 q8, q14, d1[1] \n" "vmla.f32 q9, q15, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %6, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #256] \n" "vld1.f32 {d8-d11}, [%1]! \n" "pld [%2, #32] \n" "vld1.f32 {d0[],d1[]}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "vmla.f32 q9, q5, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0] \n" : "=r"(output), // %0 "=r"(vb), // %1 "=r"(va) // %2 : "0"(output), "1"(vb), "2"(va), "r"(L), // %6 "r"(bias0) // %7 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else float sum[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum[n] += va[0] * vb[n]; sum[n] += va[1] * vb[n+8]; sum[n] += va[2] * vb[n+16]; sum[n] += va[3] * vb[n+24]; sum[n] += va[4] * vb[n+32]; sum[n] += va[5] * vb[n+40]; sum[n] += va[6] * vb[n+48]; sum[n] += va[7] * vb[n+56]; } va += 8; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n=0; n<8; n++) { output[n] = sum[n] + bias0; } #endif // __ARM_NEON output += 8; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/8 + j%8); #if __ARM_NEON && __aarch64__ const float* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4); #else const float* va = kernel_tm.channel(i/4 + i%4); #endif // __ARM_NEON && __aarch64__ int k=0; #if __ARM_NEON float32x4_t _sum0 = vdupq_n_f32(0.f); for (; k+3<L; k+=4) { float32x4_t _p0 = vld1q_f32(vb); vb += 4; float32x4_t _k0 = vld1q_f32(va); va += 4; #if __aarch64__ _sum0 = vfmaq_f32(_sum0, _p0, _k0); #else _sum0 = vmlaq_f32(_sum0, _p0, _k0); #endif } #if __aarch64__ float sum0 = bias0 + vaddvq_f32(_sum0); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float sum0 = bias0 + vget_lane_f32(vpadd_f32(_ss, _ss), 0); #endif #else float sum0 = bias0; #endif // __ARM_NEON for (; k<L; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } }
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv_im2col_sgemm_transform_kernel_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size) { const float* kernel = _kernel; #if __ARM_NEON && __aarch64__ // kernel memory packed 8 x 8 kernel_tm.create(8*kernel_size, inch, outch/8 + (outch%8)/4 + outch%4); #else // kernel memory packed 4 x 8 kernel_tm.create(4*kernel_size, inch, outch/4 + outch%4); #endif int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; const float* k0 = kernel + (p+0)*inch*kernel_size; const float* k1 = kernel + (p+1)*inch*kernel_size; const float* k2 = kernel + (p+2)*inch*kernel_size; const float* k3 = kernel + (p+3)*inch*kernel_size; const float* k4 = kernel + (p+4)*inch*kernel_size; const float* k5 = kernel + (p+5)*inch*kernel_size; const float* k6 = kernel + (p+6)*inch*kernel_size; const float* k7 = kernel + (p+7)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/8); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } #endif nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; const float* k0 = kernel + (p+0)*inch*kernel_size; const float* k1 = kernel + (p+1)*inch*kernel_size; const float* k2 = kernel + (p+2)*inch*kernel_size; const float* k3 = kernel + (p+3)*inch*kernel_size; #if __ARM_NEON && __aarch64__ float* ktmp = kernel_tm.channel(p/8 + (p%8)/4); #else float* ktmp = kernel_tm.channel(p/4); #endif // __ARM_NEON && __aarch64__ for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { const float* k0 = kernel + (p+0)*inch*kernel_size; #if __ARM_NEON && __aarch64__ float* ktmp = kernel_tm.channel(p/8 + (p%8)/4 + p%4); #else float* ktmp = kernel_tm.channel(p/4 + p%4); #endif // __ARM_NEON && __aarch64__ for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } static void conv_im2col_sgemm_neon(const Mat &bottom_blob, Mat &top_blob, const Mat & kernel_tm, const Mat& _bias, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // im2col Mat bottom_im2col(outw*outh, kernel_h*kernel_w*inch, elemsize, opt.workspace_allocator); { const int stride = kernel_h*kernel_w*outw*outh; float* ret = (float*)bottom_im2col; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<inch; p++) { const float* input = bottom_blob.channel(p); int retID = stride * p; for (int u=0; u<kernel_h; u++) { for (int v=0; v<kernel_w; v++) { for (int i=0; i<outh; i++) { for (int j=0; j<outw; j++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // bottom_im2col memory packed 8 x 8 Mat bottom_tm(8*kernel_size, inch, out_size/8 + out_size%8, elemsize, opt.workspace_allocator); { int nn_size = out_size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 8; const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/8); for (int q=0; q<inch*kernel_size; q++) { #if __ARM_NEON #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1] \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "cc", "memory", "v0", "v1" ); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0] \n" "vst1.f32 {d0-d3}, [%1] \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1" ); #endif // __aarch64__ #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; #endif // __ARM_NEON tmpptr += 8; img0 += out_size; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<out_size; i++) { const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/8 + i%8); for (int q=0; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += out_size; } } } // sgemm(int M, int N, int L, float* A, float* B, float* C) { //int M = outch; // outch int N = outw * outh; // outsize or out stride int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; #if __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 8; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); float* output4 = top_blob.channel(i+4); float* output5 = top_blob.channel(i+5); float* output6 = top_blob.channel(i+6); float* output7 = top_blob.channel(i+7); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); const float* va = kernel_tm.channel(i/8); #if __ARM_NEON asm volatile( "ld1 {v0.4s, v1.4s}, [%21] \n" "dup v16.4s, v0.s[0] \n"// sum0 "dup v17.4s, v0.s[0] \n" "dup v18.4s, v0.s[1] \n"// sum1 "dup v19.4s, v0.s[1] \n" "dup v20.4s, v0.s[2] \n"// sum2 "dup v21.4s, v0.s[2] \n" "dup v22.4s, v0.s[3] \n"// sum3 "dup v23.4s, v0.s[3] \n" "dup v24.4s, v1.s[0] \n"// sum4 "dup v25.4s, v1.s[0] \n" "dup v26.4s, v1.s[1] \n"// sum5 "dup v27.4s, v1.s[1] \n" "dup v28.4s, v1.s[2] \n"// sum6 "dup v29.4s, v1.s[2] \n" "dup v30.4s, v1.s[3] \n"// sum7 "dup v31.4s, v1.s[3] \n" "lsr w4, %w20, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" // kernel "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" // data "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// "fmla v18.4s, v8.4s, v0.s[1] \n"// sum1 += (a00-a70) * k10 "fmla v19.4s, v9.4s, v0.s[1] \n"// "fmla v20.4s, v8.4s, v0.s[2] \n"// sum2 += (a00-a70) * k20 "fmla v21.4s, v9.4s, v0.s[2] \n"// "fmla v22.4s, v8.4s, v0.s[3] \n"// sum3 += (a00-a70) * k30 "fmla v23.4s, v9.4s, v0.s[3] \n"// "fmla v24.4s, v8.4s, v1.s[0] \n"// sum4 += (a00-a70) * k40 "fmla v25.4s, v9.4s, v1.s[0] \n"// "fmla v26.4s, v8.4s, v1.s[1] \n"// sum5 += (a00-a70) * k50 "fmla v27.4s, v9.4s, v1.s[1] \n"// "fmla v28.4s, v8.4s, v1.s[2] \n"// sum6 += (a00-a70) * k60 "fmla v29.4s, v9.4s, v1.s[2] \n"// "fmla v30.4s, v8.4s, v1.s[3] \n"// sum7 += (a00-a70) * k70 "fmla v31.4s, v9.4s, v1.s[3] \n"// // k1 "fmla v16.4s, v10.4s, v2.s[0] \n"// sum0 += (a01-a71) * k01 "fmla v17.4s, v11.4s, v2.s[0] \n"// "fmla v18.4s, v10.4s, v2.s[1] \n"// sum1 += (a01-a71) * k11 "fmla v19.4s, v11.4s, v2.s[1] \n"// "fmla v20.4s, v10.4s, v2.s[2] \n"// sum2 += (a01-a71) * k21 "fmla v21.4s, v11.4s, v2.s[2] \n"// "fmla v22.4s, v10.4s, v2.s[3] \n"// sum3 += (a01-a71) * k31 "fmla v23.4s, v11.4s, v2.s[3] \n"// "fmla v24.4s, v10.4s, v3.s[0] \n"// sum4 += (a01-a71) * k41 "fmla v25.4s, v11.4s, v3.s[0] \n"// "fmla v26.4s, v10.4s, v3.s[1] \n"// sum5 += (a01-a71) * k51 "fmla v27.4s, v11.4s, v3.s[1] \n"// "fmla v28.4s, v10.4s, v3.s[2] \n"// sum6 += (a01-a71) * k61 "fmla v29.4s, v11.4s, v3.s[2] \n"// "fmla v30.4s, v10.4s, v3.s[3] \n"// sum7 += (a01-a71) * k71 "fmla v31.4s, v11.4s, v3.s[3] \n"// // k2 "fmla v16.4s, v12.4s, v4.s[0] \n"// sum0 += (a02-a72) * k02 "fmla v17.4s, v13.4s, v4.s[0] \n"// "fmla v18.4s, v12.4s, v4.s[1] \n"// sum1 += (a02-a72) * k12 "fmla v19.4s, v13.4s, v4.s[1] \n"// "fmla v20.4s, v12.4s, v4.s[2] \n"// sum2 += (a02-a72) * k22 "fmla v21.4s, v13.4s, v4.s[2] \n"// "fmla v22.4s, v12.4s, v4.s[3] \n"// sum3 += (a02-a72) * k32 "fmla v23.4s, v13.4s, v4.s[3] \n"// "fmla v24.4s, v12.4s, v5.s[0] \n"// sum4 += (a02-a72) * k42 "fmla v25.4s, v13.4s, v5.s[0] \n"// "fmla v26.4s, v12.4s, v5.s[1] \n"// sum5 += (a02-a72) * k52 "fmla v27.4s, v13.4s, v5.s[1] \n"// "fmla v28.4s, v12.4s, v5.s[2] \n"// sum6 += (a02-a72) * k62 "fmla v29.4s, v13.4s, v5.s[2] \n"// "fmla v30.4s, v12.4s, v5.s[3] \n"// sum7 += (a02-a72) * k72 "fmla v31.4s, v13.4s, v5.s[3] \n"// // k3 "fmla v16.4s, v14.4s, v6.s[0] \n"// sum0 += (a03-a73) * k03 "fmla v17.4s, v15.4s, v6.s[0] \n"// "fmla v18.4s, v14.4s, v6.s[1] \n"// sum1 += (a03-a73) * k13 "fmla v19.4s, v15.4s, v6.s[1] \n"// "fmla v20.4s, v14.4s, v6.s[2] \n"// sum2 += (a03-a73) * k23 "fmla v21.4s, v15.4s, v6.s[2] \n"// "fmla v22.4s, v14.4s, v6.s[3] \n"// sum3 += (a03-a73) * k33 "fmla v23.4s, v15.4s, v6.s[3] \n"// "fmla v24.4s, v14.4s, v7.s[0] \n"// sum4 += (a03-a73) * k43 "fmla v25.4s, v15.4s, v7.s[0] \n"// "fmla v26.4s, v14.4s, v7.s[1] \n"// sum5 += (a03-a73) * k53 "fmla v27.4s, v15.4s, v7.s[1] \n"// "fmla v28.4s, v14.4s, v7.s[2] \n"// sum6 += (a03-a73) * k63 "fmla v29.4s, v15.4s, v7.s[2] \n"// "fmla v30.4s, v14.4s, v7.s[3] \n"// sum7 += (a03-a73) * k73 "fmla v31.4s, v15.4s, v7.s[3] \n"// "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w20, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v8.4s, v9.4s}, [%8], #32 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// "fmla v18.4s, v8.4s, v0.s[1] \n"// sum1 += (a00-a70) * k10 "fmla v19.4s, v9.4s, v0.s[1] \n"// "fmla v20.4s, v8.4s, v0.s[2] \n"// sum2 += (a00-a70) * k20 "fmla v21.4s, v9.4s, v0.s[2] \n"// "fmla v22.4s, v8.4s, v0.s[3] \n"// sum3 += (a00-a70) * k30 "fmla v23.4s, v9.4s, v0.s[3] \n"// "fmla v24.4s, v8.4s, v1.s[0] \n"// sum4 += (a00-a70) * k40 "fmla v25.4s, v9.4s, v1.s[0] \n"// "fmla v26.4s, v8.4s, v1.s[1] \n"// sum5 += (a00-a70) * k50 "fmla v27.4s, v9.4s, v1.s[1] \n"// "fmla v28.4s, v8.4s, v1.s[2] \n"// sum6 += (a00-a70) * k60 "fmla v29.4s, v9.4s, v1.s[2] \n"// "fmla v30.4s, v8.4s, v1.s[3] \n"// sum7 += (a00-a70) * k70 "fmla v31.4s, v9.4s, v1.s[3] \n"// "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0] \n" "st1 {v18.4s, v19.4s}, [%1] \n" "st1 {v20.4s, v21.4s}, [%2] \n" "st1 {v22.4s, v23.4s}, [%3] \n" "st1 {v24.4s, v25.4s}, [%4] \n" "st1 {v26.4s, v27.4s}, [%5] \n" "st1 {v28.4s, v29.4s}, [%6] \n" "st1 {v30.4s, v31.4s}, [%7] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(output4), // %4 "=r"(output5), // %5 "=r"(output6), // %6 "=r"(output7), // %7 "=r"(vb), // %8 "=r"(va) // %9 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(output4), "5"(output5), "6"(output6), "7"(output7), "8"(vb), "9"(va), "r"(L), // %20 "r"(biasptr) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; float sum4[8] = {0}; float sum5[8] = {0}; float sum6[8] = {0}; float sum7[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; va += 8; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; sum4[n] += va[4] * vb[n+8]; sum5[n] += va[5] * vb[n+8]; sum6[n] += va[6] * vb[n+8]; sum7[n] += va[7] * vb[n+8]; va += 8; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; sum4[n] += va[4] * vb[n+16]; sum5[n] += va[5] * vb[n+16]; sum6[n] += va[6] * vb[n+16]; sum7[n] += va[7] * vb[n+16]; va += 8; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; sum4[n] += va[4] * vb[n+24]; sum5[n] += va[5] * vb[n+24]; sum6[n] += va[6] * vb[n+24]; sum7[n] += va[7] * vb[n+24]; va += 8; sum0[n] += va[0] * vb[n+32]; sum1[n] += va[1] * vb[n+32]; sum2[n] += va[2] * vb[n+32]; sum3[n] += va[3] * vb[n+32]; sum4[n] += va[4] * vb[n+32]; sum5[n] += va[5] * vb[n+32]; sum6[n] += va[6] * vb[n+32]; sum7[n] += va[7] * vb[n+32]; va += 8; sum0[n] += va[0] * vb[n+40]; sum1[n] += va[1] * vb[n+40]; sum2[n] += va[2] * vb[n+40]; sum3[n] += va[3] * vb[n+40]; sum4[n] += va[4] * vb[n+40]; sum5[n] += va[5] * vb[n+40]; sum6[n] += va[6] * vb[n+40]; sum7[n] += va[7] * vb[n+40]; va += 8; sum0[n] += va[0] * vb[n+48]; sum1[n] += va[1] * vb[n+48]; sum2[n] += va[2] * vb[n+48]; sum3[n] += va[3] * vb[n+48]; sum4[n] += va[4] * vb[n+48]; sum5[n] += va[5] * vb[n+48]; sum6[n] += va[6] * vb[n+48]; sum7[n] += va[7] * vb[n+48]; va += 8; sum0[n] += va[0] * vb[n+56]; sum1[n] += va[1] * vb[n+56]; sum2[n] += va[2] * vb[n+56]; sum3[n] += va[3] * vb[n+56]; sum4[n] += va[4] * vb[n+56]; sum5[n] += va[5] * vb[n+56]; sum6[n] += va[6] * vb[n+56]; sum7[n] += va[7] * vb[n+56]; va -= 56; } va += 64; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n=0; n<8; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; output4[n] = sum4[n] + biasptr[4]; output5[n] = sum5[n] + biasptr[5]; output6[n] = sum6[n] + biasptr[6]; output7[n] = sum7[n] + biasptr[7]; } #endif // __ARM_NEON output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/8 + j%8); const float* va = kernel_tm.channel(i/8); #if __ARM_NEON asm volatile( "ld1 {v14.4s, v15.4s}, [%21] \n" // sum0_7 inital with bias "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "eor v20.16b, v20.16b, v20.16b \n" // sum4 "eor v21.16b, v21.16b, v21.16b \n" // sum5 "eor v22.16b, v22.16b, v22.16b \n" // sum6 "eor v23.16b, v23.16b, v23.16b \n" // sum7 "lsr w4, %w20, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" // k "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v8.4s}, [%8], #16 \n" // d // k0 "fmla v16.4s, v0.4s, v8.s[0] \n"// sum0 += (k00-k70) * a00 "fmla v17.4s, v1.4s, v8.s[0] \n"// "fmla v18.4s, v2.4s, v8.s[1] \n"// sum1 += (k01-k71) * a10 "fmla v19.4s, v3.4s, v8.s[1] \n"// "fmla v20.4s, v4.4s, v8.s[2] \n"// sum2 += (k02-k72) * a20 "fmla v21.4s, v5.4s, v8.s[2] \n"// "fmla v22.4s, v6.4s, v8.s[3] \n"// sum3 += (k03-k73) * a30 "fmla v23.4s, v7.4s, v8.s[3] \n"// "subs w4, w4, #1 \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "fadd v16.4s, v16.4s, v20.4s \n" "fadd v17.4s, v17.4s, v21.4s \n" "fadd v14.4s, v14.4s, v16.4s \n" "fadd v15.4s, v15.4s, v17.4s \n" "1: \n" // remain loop "and w4, %w20, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "prfm pldl1keep, [%8, #32] \n" "ld1r {v8.4s}, [%8], #4 \n" // k0 "fmla v14.4s, v8.4s, v0.4s \n"// sum0 += (k00-k70) * a00 "fmla v15.4s, v8.4s, v1.4s \n"// "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v14.s}[0], [%0] \n" "st1 {v14.s}[1], [%1] \n" "st1 {v14.s}[2], [%2] \n" "st1 {v14.s}[3], [%3] \n" "st1 {v15.s}[0], [%4] \n" "st1 {v15.s}[1], [%5] \n" "st1 {v15.s}[2], [%6] \n" "st1 {v15.s}[3], [%7] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(output4), // %4 "=r"(output5), // %5 "=r"(output6), // %6 "=r"(output7), // %7 "=r"(vb), // %8 "=r"(va) // %9 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(output4), "5"(output5), "6"(output6), "7"(output7), "8"(vb), "9"(va), "r"(L), // %20 "r"(biasptr) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; float sum4 = biasptr[4]; float sum5 = biasptr[5]; float sum6 = biasptr[6]; float sum7 = biasptr[7]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif // __ARM_NEON output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } #endif // __aarch64__ nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = remain_outch_start + pp * 4; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); #if __ARM_NEON && __aarch64__ const float* va = kernel_tm.channel(i/8 + (i%8)/4); #else const float* va = kernel_tm.channel(i/4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%13] \n" "dup v16.4s, v0.s[0] \n"// sum0 "dup v17.4s, v0.s[0] \n" "dup v18.4s, v0.s[1] \n"// sum1 "dup v19.4s, v0.s[1] \n" "dup v20.4s, v0.s[2] \n"// sum2 "dup v21.4s, v0.s[2] \n" "dup v22.4s, v0.s[3] \n"// sum3 "dup v23.4s, v0.s[3] \n" "lsr w4, %w12, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" // kernel "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // data "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" "subs w4, w4, #1 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// "fmla v18.4s, v8.4s, v0.s[1] \n"// sum1 += (a00-a70) * k10 "fmla v19.4s, v9.4s, v0.s[1] \n"// "fmla v20.4s, v8.4s, v0.s[2] \n"// sum2 += (a00-a70) * k20 "fmla v21.4s, v9.4s, v0.s[2] \n"// "fmla v22.4s, v8.4s, v0.s[3] \n"// sum3 += (a00-a70) * k30 "fmla v23.4s, v9.4s, v0.s[3] \n"// // k1 "fmla v16.4s, v10.4s, v1.s[0] \n"// sum0 += (a01-a71) * k01 "fmla v17.4s, v11.4s, v1.s[0] \n"// "fmla v18.4s, v10.4s, v1.s[1] \n"// sum1 += (a01-a71) * k11 "fmla v19.4s, v11.4s, v1.s[1] \n"// "fmla v20.4s, v10.4s, v1.s[2] \n"// sum2 += (a01-a71) * k21 "fmla v21.4s, v11.4s, v1.s[2] \n"// "fmla v22.4s, v10.4s, v1.s[3] \n"// sum3 += (a01-a71) * k31 "fmla v23.4s, v11.4s, v1.s[3] \n"// // k2 "fmla v16.4s, v12.4s, v2.s[0] \n"// sum0 += (a02-a72) * k02 "fmla v17.4s, v13.4s, v2.s[0] \n"// "fmla v18.4s, v12.4s, v2.s[1] \n"// sum1 += (a02-a72) * k12 "fmla v19.4s, v13.4s, v2.s[1] \n"// "fmla v20.4s, v12.4s, v2.s[2] \n"// sum2 += (a02-a72) * k22 "fmla v21.4s, v13.4s, v2.s[2] \n"// "fmla v22.4s, v12.4s, v2.s[3] \n"// sum3 += (a02-a72) * k32 "fmla v23.4s, v13.4s, v2.s[3] \n"// // k3 "fmla v16.4s, v14.4s, v3.s[0] \n"// sum0 += (a03-a73) * k03 "fmla v17.4s, v15.4s, v3.s[0] \n"// "fmla v18.4s, v14.4s, v3.s[1] \n"// sum1 += (a03-a73) * k13 "fmla v19.4s, v15.4s, v3.s[1] \n"// "fmla v20.4s, v14.4s, v3.s[2] \n"// sum2 += (a03-a73) * k23 "fmla v21.4s, v15.4s, v3.s[2] \n"// "fmla v22.4s, v14.4s, v3.s[3] \n"// sum3 += (a03-a73) * k33 "fmla v23.4s, v15.4s, v3.s[3] \n"// "bne 0b \n" "1: \n" // remain loop "and w4, %w12, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4s}, [%5], #16 \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4s, v9.4s}, [%4], #32 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// "fmla v18.4s, v8.4s, v0.s[1] \n"// sum1 += (a00-a70) * k10 "fmla v19.4s, v9.4s, v0.s[1] \n"// "fmla v20.4s, v8.4s, v0.s[2] \n"// sum2 += (a00-a70) * k20 "fmla v21.4s, v9.4s, v0.s[2] \n"// "fmla v22.4s, v8.4s, v0.s[3] \n"// sum3 += (a00-a70) * k30 "fmla v23.4s, v9.4s, v0.s[3] \n"// "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0] \n" "st1 {v18.4s, v19.4s}, [%1] \n" "st1 {v20.4s, v21.4s}, [%2] \n" "st1 {v22.4s, v23.4s}, [%3] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(vb), // %4 "=r"(va) // %5 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(vb), "5"(va), "r"(L), // %12 "r"(biasptr) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else asm volatile( "vld1.f32 {d0-d1}, [%13] \n" "vdup.f32 q8, d0[0] \n" "vdup.f32 q9, d0[0] \n" "vdup.f32 q10, d0[1] \n" "vdup.f32 q11, d0[1] \n" "vdup.f32 q12, d1[0] \n" "vdup.f32 q13, d1[0] \n" "vdup.f32 q14, d1[1] \n" "vdup.f32 q15, d1[1] \n" "lsr r4, %12, #2 \n"// r4 = nn = L >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n"// kernel "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n"// data "vmla.f32 q8, q4, d0[0] \n"// sum0 = (a00-a07) * k00 "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n"// sum1 = (a00-a07) * k10 "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n"// sum2 = (a00-a07) * k20 "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n"// sum3 = (a00-a07) * k30 "vmla.f32 q15, q5, d1[1] \n" "vmla.f32 q8, q6, d2[0] \n"// sum0 += (a10-a17) * k01 "vmla.f32 q9, q7, d2[0] \n" "vmla.f32 q10, q6, d2[1] \n"// sum1 += (a10-a17) * k11 "vmla.f32 q11, q7, d2[1] \n" "vmla.f32 q12, q6, d3[0] \n"// sum2 += (a10-a17) * k21 "vmla.f32 q13, q7, d3[0] \n" "vmla.f32 q14, q6, d3[1] \n"// sum3 += (a10-a17) * k31 "vmla.f32 q15, q7, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n"// data "vmla.f32 q8, q4, d4[0] \n"// sum0 += (a20-a27) * k02 "vmla.f32 q9, q5, d4[0] \n" "vmla.f32 q10, q4, d4[1] \n"// sum1 += (a20-a27) * k12 "vmla.f32 q11, q5, d4[1] \n" "vmla.f32 q12, q4, d5[0] \n"// sum2 += (a20-a27) * k22 "vmla.f32 q13, q5, d5[0] \n" "vmla.f32 q14, q4, d5[1] \n"// sum3 += (a20-a27) * k32 "vmla.f32 q15, q5, d5[1] \n" "vmla.f32 q8, q6, d6[0] \n"// sum0 += (a30-a37) * k03 "vmla.f32 q9, q7, d6[0] \n" "vmla.f32 q10, q6, d6[1] \n"// sum1 += (a30-a37) * k13 "vmla.f32 q11, q7, d6[1] \n" "vmla.f32 q12, q6, d7[0] \n"// sum2 += (a30-a37) * k23 "vmla.f32 q13, q7, d7[0] \n" "vmla.f32 q14, q6, d7[1] \n"// sum3 += (a30-a37) * k33 "vmla.f32 q15, q7, d7[1] \n" "subs r4, r4, #1 \n" "bne 0b \n"// end for "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5]! \n" "pld [%4, #256] \n" "vld1.f32 {d8-d11}, [%4]! \n" "vmla.f32 q8, q4, d0[0] \n"// sum0 += (a00-a70) * k00 "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n"// sum1 += (a00-a70) * k10 "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n"// sum2 += (a00-a70) * k20 "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n"// sum3 += (a00-a70) * k30 "vmla.f32 q15, q5, d1[1] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.f32 {d16-d19}, [%0] \n" "vst1.f32 {d20-d23}, [%1] \n" "vst1.f32 {d24-d27}, [%2] \n" "vst1.f32 {d28-d31}, [%3] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(vb), // %4 "=r"(va) // %5 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(vb), "5"(va), "r"(L), // %12 "r"(biasptr) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; va += 4; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; va += 4; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; va += 4; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; va += 4; sum0[n] += va[0] * vb[n+32]; sum1[n] += va[1] * vb[n+32]; sum2[n] += va[2] * vb[n+32]; sum3[n] += va[3] * vb[n+32]; va += 4; sum0[n] += va[0] * vb[n+40]; sum1[n] += va[1] * vb[n+40]; sum2[n] += va[2] * vb[n+40]; sum3[n] += va[3] * vb[n+40]; va += 4; sum0[n] += va[0] * vb[n+48]; sum1[n] += va[1] * vb[n+48]; sum2[n] += va[2] * vb[n+48]; sum3[n] += va[3] * vb[n+48]; va += 4; sum0[n] += va[0] * vb[n+56]; sum1[n] += va[1] * vb[n+56]; sum2[n] += va[2] * vb[n+56]; sum3[n] += va[3] * vb[n+56]; va -= 28; } va += 32; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n=0; n<8; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; } #endif // __ARM_NEON output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j<N; j++) { float* vb = bottom_tm.channel(j/8 + j%8); #if __ARM_NEON && __aarch64__ const float* va = kernel_tm.channel(i/8 + (i%8)/4); #else const float* va = kernel_tm.channel(i/4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v14.4s}, [%13] \n" // sum0_3 inital with bias "lsr w4, %w12, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" // k "prfm pldl1keep, [%4, #128] \n" "ld1 {v8.4s}, [%4], #16 \n" // d "subs w4, w4, #1 \n" "fmla v16.4s, v0.4s, v8.s[0] \n"// sum0 += (k00-k30) * a00 "fmla v17.4s, v1.4s, v8.s[1] \n"// sum1 += (k01-k31) * a10 "fmla v18.4s, v2.4s, v8.s[2] \n"// sum2 += (k02-k32) * a20 "fmla v19.4s, v3.4s, v8.s[3] \n"// sum3 += (k03-k33) * a30 "bne 0b \n" "add v16.4s, v16.4s, v18.4s \n" "add v17.4s, v17.4s, v19.4s \n" "add v14.4s, v14.4s, v16.4s \n" "add v14.4s, v14.4s, v17.4s \n" "1: \n" // remain loop "and w4, %w12, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "prfm pldl1keep, [%4, #32] \n" "ld1r {v8.4s}, [%4], #4 \n" "subs w4, w4, #1 \n" // k0 "fmla v14.4s, v8.4s, v0.4s \n"// sum0 += (k00-k30) * a00 "bne 2b \n" "3: \n" "st1 {v14.s}[0], [%0] \n" "st1 {v14.s}[1], [%1] \n" "st1 {v14.s}[2], [%2] \n" "st1 {v14.s}[3], [%3] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(vb), // %4 "=r"(va) // %5 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(vb), "5"(va), "r"(L), // %12 "r"(biasptr) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); #else asm volatile( // inch loop "vld1.f32 {d24-d25}, [%13] \n" "lsr r4, %12, #2 \n"// r4 = nn = L >> 2 "cmp r4, #0 \n" "beq 1f \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "veor q10, q10, q10 \n" "veor q11, q11, q11 \n" "0: \n"// for(; nn != 0; nn--) "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n"// kernel "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4]! \n"// data "vmla.f32 q8, q0, d8[0] \n"// (k00-k30) * a00 "vmla.f32 q9, q1, d8[1] \n"// (k01-k31) * a01 "vmla.f32 q10, q2, d9[0] \n"// (k02-k32) * a02 "vmla.f32 q11, q3, d9[1] \n"// (k03-k33) * a03 "subs r4, r4, #1 \n" "bne 0b \n"// end for "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q12, q12, q8 \n" "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5]! \n" "pld [%4, #32] \n" "vld1.f32 {d8[],d9[]}, [%4]! \n" "subs r4, r4, #1 \n" "vmla.f32 q12, q0, q4 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.f32 {d24[0]}, [%0] \n" "vst1.f32 {d24[1]}, [%1] \n" "vst1.f32 {d25[0]}, [%2] \n" "vst1.f32 {d25[1]}, [%3] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(vb), // %4 "=r"(va) // %5 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(vb), "5"(va), "r"(L), // %12 "r"(biasptr) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __ARM_NEON output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_outch_start; i<outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); #if __ARM_NEON && __aarch64__ const float* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4); #else const float* va = kernel_tm.channel(i/4 + i%4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "dup v16.4s, %w7 \n" // sum0 "dup v17.4s, %w7 \n" // sum0n "lsr w4, %w6, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" // data "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// // k1 "fmla v16.4s, v10.4s, v0.s[1] \n"// sum0 += (a01-a71) * k01 "fmla v17.4s, v11.4s, v0.s[1] \n"// // k2 "fmla v16.4s, v12.4s, v0.s[2] \n"// sum0 += (a02-a72) * k02 "fmla v17.4s, v13.4s, v0.s[2] \n"// // k3 "fmla v16.4s, v14.4s, v0.s[3] \n"// sum0 += (a03-a73) * k03 "fmla v17.4s, v15.4s, v0.s[3] \n"// "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w6, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%2, #32] \n" "ld1r {v0.4s}, [%2], #4 \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1], #32 \n" "subs w4, w4, #1 \n" // k0 "fmla v16.4s, v0.4s, v8.4s \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v0.4s, v9.4s \n"// "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0] \n" : "=r"(output), // %0 "=r"(vb), // %1 "=r"(va) // %2 : "0"(output), "1"(vb), "2"(va), "r"(L), // %6 "r"(bias0) // %7 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17" ); #else asm volatile( "vdup.f32 q8, %7 \n" "vdup.f32 q9, %7 \n" // inch loop "lsr r4, %6, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "pld [%1, #512] \n" "vldm %1!, {d24-d31} \n" "vmla.f32 q8, q6, d0[1] \n" "vmla.f32 q9, q7, d0[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q12, d1[0] \n" "vmla.f32 q9, q13, d1[0] \n" "vmla.f32 q8, q14, d1[1] \n" "vmla.f32 q9, q15, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %6, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #256] \n" "vld1.f32 {d8-d11}, [%1]! \n" "pld [%2, #32] \n" "vld1.f32 {d0[],d1[]}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "vmla.f32 q9, q5, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0] \n" : "=r"(output), // %0 "=r"(vb), // %1 "=r"(va) // %2 : "0"(output), "1"(vb), "2"(va), "r"(L), // %6 "r"(bias0) // %7 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else float sum[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum[n] += va[0] * vb[n]; sum[n] += va[1] * vb[n+8]; sum[n] += va[2] * vb[n+16]; sum[n] += va[3] * vb[n+24]; sum[n] += va[4] * vb[n+32]; sum[n] += va[5] * vb[n+40]; sum[n] += va[6] * vb[n+48]; sum[n] += va[7] * vb[n+56]; } va += 8; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n=0; n<8; n++) { output[n] = sum[n] + bias0; } #endif // __ARM_NEON output += 8; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/8 + j%8); #if __ARM_NEON && __aarch64__ const float* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4); #else const float* va = kernel_tm.channel(i/4 + i%4); #endif // __ARM_NEON && __aarch64__ int k=0; #if __ARM_NEON float32x4_t _sum0 = vdupq_n_f32(0.f); for (; k+3<L; k+=4) { float32x4_t _p0 = vld1q_f32(vb); vb += 4; float32x4_t _k0 = vld1q_f32(va); va += 4; #if __aarch64__ _sum0 = vfmaq_f32(_sum0, _p0, _k0); #else _sum0 = vmlaq_f32(_sum0, _p0, _k0); #endif } #if __aarch64__ float sum0 = bias0 + vaddvq_f32(_sum0); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float sum0 = bias0 + vget_lane_f32(vpadd_f32(_ss, _ss), 0); #endif #else float sum0 = bias0; #endif // __ARM_NEON for (; k<L; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } }
GB_binop__div_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__div_fc32 // A.*B function (eWiseMult): GB_AemultB__div_fc32 // A*D function (colscale): GB_AxD__div_fc32 // D*A function (rowscale): GB_DxB__div_fc32 // C+=B function (dense accum): GB_Cdense_accumB__div_fc32 // C+=b function (dense accum): GB_Cdense_accumb__div_fc32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_fc32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_fc32 // C=scalar+B GB_bind1st__div_fc32 // C=scalar+B' GB_bind1st_tran__div_fc32 // C=A+scalar GB_bind2nd__div_fc32 // C=A'+scalar GB_bind2nd_tran__div_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = GB_FC32_div (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_FC32_div (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_FC32 || GxB_NO_DIV_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__div_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__div_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__div_fc32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__div_fc32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__div_fc32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__div_fc32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__div_fc32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__div_fc32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__div_fc32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = Bx [p] ; Cx [p] = GB_FC32_div (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__div_fc32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = Ax [p] ; Cx [p] = GB_FC32_div (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_div (x, aij) ; \ } GrB_Info GB_bind1st_tran__div_fc32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_div (aij, y) ; \ } GrB_Info GB_bind2nd_tran__div_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__div_fc32 // A.*B function (eWiseMult): GB_AemultB__div_fc32 // A*D function (colscale): GB_AxD__div_fc32 // D*A function (rowscale): GB_DxB__div_fc32 // C+=B function (dense accum): GB_Cdense_accumB__div_fc32 // C+=b function (dense accum): GB_Cdense_accumb__div_fc32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_fc32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_fc32 // C=scalar+B GB_bind1st__div_fc32 // C=scalar+B' GB_bind1st_tran__div_fc32 // C=A+scalar GB_bind2nd__div_fc32 // C=A'+scalar GB_bind2nd_tran__div_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = GB_FC32_div (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_FC32_div (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_FC32 || GxB_NO_DIV_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__div_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__div_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__div_fc32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__div_fc32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__div_fc32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__div_fc32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__div_fc32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__div_fc32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__div_fc32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = Bx [p] ; Cx [p] = GB_FC32_div (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__div_fc32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = Ax [p] ; Cx [p] = GB_FC32_div (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_div (x, aij) ; \ } GrB_Info GB_bind1st_tran__div_fc32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_div (aij, y) ; \ } GrB_Info GB_bind2nd_tran__div_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__div_fc32 // A.*B function (eWiseMult): GB_AemultB__div_fc32 // A*D function (colscale): GB_AxD__div_fc32 // D*A function (rowscale): GB_DxB__div_fc32 // C+=B function (dense accum): GB_Cdense_accumB__div_fc32 // C+=b function (dense accum): GB_Cdense_accumb__div_fc32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_fc32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_fc32 // C=scalar+B GB_bind1st__div_fc32 // C=scalar+B' GB_bind1st_tran__div_fc32 // C=A+scalar GB_bind2nd__div_fc32 // C=A'+scalar GB_bind2nd_tran__div_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = GB_FC32_div (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_FC32_div (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_FC32 || GxB_NO_DIV_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__div_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__div_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__div_fc32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__div_fc32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__div_fc32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__div_fc32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__div_fc32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__div_fc32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__div_fc32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = Bx [p] ; Cx [p] = GB_FC32_div (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__div_fc32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = Ax [p] ; Cx [p] = GB_FC32_div (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_div (x, aij) ; \ } GrB_Info GB_bind1st_tran__div_fc32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_div (aij, y) ; \ } GrB_Info GB_bind2nd_tran__div_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ExSHalosLC4.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <fftw3.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_spline.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include "fftlog.h" #define check_memory(p, name) if(p == NULL){printf("Problems to alloc %s.\n", name); return 0;} #define Lc_MAX 1.0e+2 #define Mc_MIN 1.0e+5 #define M_max 6.0e+15 /*Structure for the peaks in the density field*/ typedef struct Halos_centers { int x[3]; /*Index of the halo center*/ float den; /*Density of the halo's central cell*/ } PEAKS; /*Structure for the final halos*/ typedef struct Halos { int x[3]; /*Index of central cell of teh halo*/ int cont; /*Number of cells in the halo*/ } HALOS; /*Barrier used for the halo definition*/ float Barrier(float S, float dc, char barrier, float a, float b, float alpha){ float resp; /*The Press-Schechter barrier*/ if(barrier == 0) resp = dc; /*The Sheth-Tormen barrier*/ else if(barrier == 1) resp = sqrt(a)*dc*(1.0 + b*pow(S/(a*dc*dc), alpha)); return resp; } /*Partition function for the quicksort*/ long int partition_peaks( PEAKS a[], long l, long r) { long i, j, k; PEAKS pivot, t; pivot.den = a[l].den; for(k=0;k<3;k++) pivot.x[k] = a[l].x[k]; i = l; j = r+1; while( 1){ do ++i; while( a[i].den >= pivot.den && i < r ); do --j; while( a[j].den < pivot.den ); if( i >= j ) break; t.den = a[i].den; a[i].den = a[j].den; a[j].den = t.den; for(k=0;k<3;k++){ t.x[k] = a[i].x[k]; a[i].x[k] = a[j].x[k]; a[j].x[k] = t.x[k];} } t.den = a[l].den; a[l].den = a[j].den; a[j].den= t.den; for(k=0;k<3;k++){ t.x[k] = a[l].x[k]; a[l].x[k] = a[j].x[k]; a[j].x[k] = t.x[k];} return j; } /*The quicksort algorithm to sort the peaks list*/ void quickSort_peaks( PEAKS a[], long l, long r){ long j; if( l < r ){ // divide and conquer j = partition_peaks( a, l, r); quickSort_peaks( a, l, j-1); quickSort_peaks( a, j+1, r); } } /*Define the distance between two cells*/ long int dist2(long int i, long int j, long int k){ long int resp; resp = i*i + j*j + k*k; return resp; } /*Define the cyclic sum for floats*/ float cysumf(float x, float y, float L){ float resp; resp = x + y; if(resp>=L) resp -= L; if(resp<0) resp += L; return resp; } /*Define the cyclic sum*/ int cysum(int i, int j, int nd){ int resp; resp = i+j; if(resp>=nd) resp -= nd; if(resp<0) resp += nd; return resp; } /*Window function in the Fourier space*/ double W(double k, double R){ double resp; resp = 3.0/(pow(k*R,2))*(sin(k*R)/(k*R) - cos(k*R)); return resp; } /*Evaluate the square root of matter variance*/ double calc_sigma(double *k, double *P, int Nk, double R){ int i; double resp; resp = 0.0; for(i=0;i<Nk-1;i++) resp += (k[i+1] - k[i])/2.0*(P[i]*pow(k[i]*W(k[i],R), 2) + P[i+1]*pow(k[i+1]*W(k[i+1],R), 2)); return resp/(2.0*M_PI*M_PI); } /*Evaluate the mass function for a given sigma*/ double fh(double sigma, int model, double dc){ double resp, nu; double B, d, e, f, g; //Press-Schechter if(model == 0){ nu = dc/sigma; resp = sqrt(2.0/M_PI)*nu*exp(-nu*nu/2.0); } //Tinker Delta = 300 else if(model == 1){ B = 0.466; d = 2.06; e = 0.99; f = 0.48; g = 1.310; resp = B*(pow(sigma/e, -d) + pow(sigma, -f))*exp(-g/(sigma*sigma)); } return resp; } /*Find the index of the next sphere*/ int Next_Count(int *spheres, int Ncells, int count){ int i, resp; for(i=0;i<Ncells;i++) if(spheres[i] == count){ resp = i + 1; break; } return resp; } /*Halo concentration*/ float f_c(float Mv, float Mstar, float z){ float resp; resp = 9.0/(1.0 + z)*pow(Mv/Mstar, -0.13); return resp; } /*Generate a random number from 0 to Rv following the NFW profile*/ float Generate_NFW(float rv, float c, float A, int seed){ float Int, rs, r, rtmp; gsl_rng *rng_ptr; rng_ptr = gsl_rng_alloc (gsl_rng_taus); gsl_rng_set(rng_ptr, seed); Int = gsl_rng_uniform(rng_ptr); rs = rv/c; r = gsl_rng_uniform(rng_ptr); rtmp = r + 1.0; while(fabs(r-rtmp) > 0.001){ rtmp = r; r = r - ((log(1.0 + r*c) - r*c/(1.0 + r*c) - A*Int)*pow(1.0 + r*c, 2))/(c*(2.0*r*c + r*r*c*c)); } gsl_rng_free(rng_ptr); return r*rv; } /*Mean value of central galaxies*/ float Ncentral(float M, float logMmin, float siglogM){ float resp; resp = 0.5*(1.0 + erf((log10(M) - logMmin)/siglogM)); return resp; } /*Mean value of satellite galaxies*/ float Nsatellite(float M, float logM0, float logM1, float alpha){ float resp; resp = pow((M - pow(10.0, logM0))/pow(10.0, logM1), alpha); return resp; } int main(int argc,char *argv[]) { FILE *power, *den_grid, *halo_cat, *disp_cat, *light_cat, *collapse_cat; char powerfile[100], denfile[100], halofile[100], dispfile[100], lightfile[100], collapsefile[100]; char DO_2LPT, BARRIER, out_inter, out_halos, out_collapse, DEN_GRID, DISP_CAT, DO_HOD; int i, j, k, nx, ny, nz, nz2, Nmin, N_cores, Nk, Nr, Ncells, seed, cont_tmp, grows, grows_tmp, tmp, nmin, m, m_tmp, nsnap; long np, cont, nh, l; float Lc, Om0, redshift, Growth, dc, EB_a, EB_b, EB_alpha, rhoc, Hz, Omz, rhomz, Mtot, Mcell, Lx, Ly, Lz, klx, kly, klz, Normx, Normk, Dv, kx, ky, kz, kmod, *Sig_grid, sigtmp, std, den, den_tmp, factx, facty, factz, fact, phixx, phixy, phixz, phiyy, phiyz, phizz, Pobs[3], LoS[3], dist_min, dist_max, theta_min, cos_min; double *K, *P, *R, *M, *Sig, Rmin, Rmax, *R_xi, *Xi; fftwf_plan p1, p2; fftwf_complex *deltak, *deltak_tmp; float *delta; int *flag, *sphere; PEAKS *peaks, *tmpp; HALOS *halos; if (argc != 35){ printf("\nWrong number of arguments.\n"); printf("arg1: Name of the power spectrum file.\n"); printf("arg2: Size (in Mpc/h) or mass (in M_odot/h) of each cell.\n"); printf("arg3-5: Number of cells along each direction.\n"); printf("arg6: Some seed for the random number generator.\n"); printf("arg7: Use the 2LPT to move the halos? Yes (1) or No (0).\n"); printf("arg8: The Value of Omega_m today.\n"); printf("arg9: The readshift z.\n"); printf("arg10: The rate between the growth function at the final resdshit and at the redshift of the input power spectrum.\n"); printf("arg11: The value of critical density delta _{c}. Put 0 to use the fit.\n"); printf("arg12: The minimum number of partiles in a halo of the final catalogue.\n"); printf("arg13: The number of cores to use in the parallel parts.\n"); printf("arg14: Prefix for the outputs.\n"); printf("arg15: Which barrier would you like to use to find the halos?\n\tThe statical barrier (SB) (0);\n\tThe ellipsoidal barrier (EB) (1).\n"); printf("arg16: Which intermediate results would you like to save?:\n\tNo one (0);\n\tThe gaussian density grid (1);\n\tThe particles displaced with LPT (2);\n\tBoth (3).\n"); printf("arg17: How do you want the final halo catalogue?\n\tNo halo catalogue (0);\n\tThe positions and velocities in the real space (1);\n\tThe positions and velocities in the real space light cone (2);\n\tThe positions and velocities in redshift space light cone(3).\n"); printf("arg18-20: The three parameters for the ellipsoidal barrier: a, b and alpha.\n"); printf("arg21: Read the density grid (0) or compute it (1)?\n"); printf("arg22: Read the displacement field (0) or compute it (1)?\n"); printf("arg23-25: Position of the observer in units of the box size.\n"); printf("arg26-28: Direction of the line of sight.\n"); printf("arg29-30: Minimum and maximum comoving distance of the halos in this snapshot in the light cone.\n"); printf("arg31: Angular aperture of the light cone in units of pi.\n"); printf("arg32: Save the information about the collapsed particles in the light cone? Yes (1) or No (0).\n"); printf("arg33: Populate the halos with a HOD?\n\tNo (0);\n\tYes, with a single type of galaxy (1)\n\tYes, with multiple types of galaxies(2).\n"); printf("arg34: Number of this snapshot.\n"); exit(0); } /*Get the name of all files*/ sprintf(powerfile, "%s", argv[1]); sprintf(denfile, "%s_den.dat", argv[14]); sprintf(halofile, "%s_halos.dat", argv[14]); sprintf(dispfile, "%s_disp.dat", argv[14]); /*Parameters with specifications of the box and options for this simulation*/ Lc = atof(argv[2]); //Size or mass of each cell nx = atoi(argv[3]); //Number of cells along the x-direction ny = atoi(argv[4]); //Number of cells along the y-direction nz = atoi(argv[5]); //Number of cells along the z-direction seed = atoi(argv[6]); //Seed for the random generator (same seed gives the same final catalogue) DO_2LPT = (char)atoi(argv[7]); //Parameter with the information about the use (or not) of second order lagrangian perturbation theory Nmin = atoi(argv[12]); //Number of particles in the smaller final halo N_cores = atoi(argv[13]); //Number of cores used by openmp in the parallel parts BARRIER = (char)atoi(argv[15]); //Parameter with the information about the utilization (or not) of the EB out_inter = (char)atoi(argv[16]); //Parameter with the information about which intermediate results must be output out_halos = (char)atoi(argv[17]); //Parameter with the information about what to save in the final halo catalogue out_collapse = (char)atoi(argv[32]); //Parameter with the information about the collapsed particles in the light cone DEN_GRID = (char)atoi(argv[21]); //Compute a new density field (1) or just read it from a file (0)? DISP_CAT = (char)atoi(argv[22]); //Compute the displacement field (1) or just read it from a file (0)? DO_HOD = (char)atoi(argv[33]); //Populate the halos with no galaxies (0), one type of galaxy (1) or multiple types (2)? /*Some physical parametrs used in this simulation*/ Om0 = atof(argv[8]); //Omega_m value today (z=0) redshift = atof(argv[9]); //Redshift of the final catalogues Growth = atof(argv[10]); //Ratio between the growth function at the final redshift and the redshift of the inpur power spectrum dc = atof(argv[11]); //Value of the critical density for the halo formation linearly extrapoleted using linear theory to the redshift of the final catalogues /*Parameters for the EB*/ EB_a = atof(argv[18]); //Parameter a of the EB EB_b = atof(argv[19]); //Parameter b of the EB EB_alpha = atof(argv[20]); //Parameter alpha of the EB /*Parameters for the construction of the light cone*/ Pobs[0] = atof(argv[23]); //Position x of the observer in units of the box size Pobs[1] = atof(argv[24]); //Position y of the observer in units of the box size Pobs[2] = atof(argv[25]); //Position z of the observer in units of the box size LoS[0] = atof(argv[26]); //Component x of the direction of the line of sight LoS[1] = atof(argv[27]); //Component y of the direction of the line of sight LoS[2] = atof(argv[28]); //Component z of the direction of the line of sight /*Normalize the LoS vector*/ kmod = 0.0; for(i=0;i<3;i++) kmod += LoS[i]*LoS[i]; for(i=0;i<3;i++) LoS[i] = LoS[i]/sqrt(kmod); dist_min = atof(argv[29]); //Minimum comoving distance of this slice dist_max = atof(argv[30]); //Maximum comoving distance of this slice theta_min = atof(argv[31])*M_PI; //Minimum angle theta cos_min = cos(theta_min); //Cossine of the minimum angle theta nsnap = atoi(argv[34]); //Number of this snapshot sprintf(lightfile, "%s_%d_LightCone.dat", argv[14], nsnap); sprintf(collapsefile, "%s_%d_Collapse.dat", argv[14], nsnap); /*Some derived parameters used in this simulation*/ rhoc = 2.775e+11; //Critical density in unitis of M_odot/Mpc*h^2 Hz = 100.0*sqrt(Om0*pow(1.0 + redshift, 3.0) + (1.0 - Om0)); //Hubble constant at the final redshift Omz = Om0*pow(1.0 + redshift, 3.0)/(Om0*pow(1.0 + redshift, 3.0) + (1.0 - Om0));//Matter contrast density at the final redshift rhomz = Om0*rhoc; //Matter density at the final redshift Dv = (18*M_PI*M_PI + 82.0*(Omz - 1.0) - 39.0*pow(Omz - 1.0, 2.0))/Omz; //Overdensity used to put galaxies in the halos if(Lc < Lc_MAX) //If the size of each cell was given compute the mass of each cell Mcell = rhomz*pow(Lc, 3.0); else if(Lc > Mc_MIN){ //If the mass of each cell was given compute the size of each cell Mcell = Lc; Lc = pow(Mcell/rhomz, 1.0/3.0); } else{ //Notify an unexpected behavior and exit printf("A cell larger than %f [Mpc/h] or with a mass smaller than %e [M_odot/h] is not expected. Please, change this value or change the definition of Lc_MAX and Mc_MIN in the code.\n", Lc_MAX, Mc_MIN); exit(0); } Lx = Lc*nx; //Compute the size of the box along the x-direction Ly = Lc*ny; //Compute the size of the box along the y-direction Lz = Lc*nz; //Compute the size of the box along the z-direction Mtot = rhomz*Lx*Ly*Lz; //Compute the total mass in the box klx = 2.0*M_PI/Lx; //Compute the fundamental frequency in the x-direction kly = 2.0*M_PI/Ly; //Compute the fundamental frequency in the y-direction klz = 2.0*M_PI/Lz; //Compute the fundamental frequency in the z-direction Normx = 1.0/sqrt(Lx*Ly*Lz); //Compute the normalization needed when aplyed the FFTW3 from k to x space Normk = sqrt(Lx*Ly*Lz)/(nx*ny*nz); //Compute the normalization needed when aplyed the FFTW3 from x to k space nz2 = nz/2 + 1; //Quantity used to alloc the complex arrays used in the FFTW3 nmin = nx; //Determine the smaller direction if(nmin > ny) nmin = ny; if(nmin > nz) nmin = nz; /*Compute the number of repetitions of this box to construct the light cone*/ float Pos[3], dist, cost, vr, Mass; int Nrep_x, Nrep_y, Nrep_z; if(out_halos == 2 || out_halos == 3){ Nrep_x = floor(dist_max/Lx) + 1; Nrep_y = floor(dist_max/Ly) + 1; Nrep_z = floor(dist_max/Lz) + 1; } /*Parameters of the HOD model*/ int Ngals, Ncen, Nsat; float r, phi, theta, Rv, C, A; float logMmin, siglogM, logM0, logM1, alpha; logMmin = 12.44005264; siglogM = 0.79560376; logM0 = 11.98154109; logM1 = 12.99600074; alpha = 1.13717828; /*Check some inputs before to start*/ if(out_inter == 0 && out_halos == 0){ printf("You need to choose something to output! arg16, arg17 and/or arg18 must be >0!\n"); exit(0); } if(nx<0 || ny<0 || nz<0){ printf("You are trying to use n = (%d, %d, %d) and it is not possible!\n", nx, ny, nz); exit(0); } if(DO_2LPT < 0 || DO_2LPT >1){ printf("You are trying to use DO_2LPT = %d and it is not possible! Setting DO_2LPT = 0.\n", DO_2LPT); DO_2LPT = 0; } if(Growth <= 0.0){ printf("You gave a value of the ratio between the growths of %f and it is not physical!\n", Growth); exit(0); } if(Nmin < 0){ printf("You gave a negative number for the number of particles in the smaller halo (%d). Settin it in 1.\n", Nmin); Nmin = 1; } if(N_cores < 0){ printf("You gave a negative number for the number of cores (%d). Settin it in 1.\n", N_cores); N_cores = 1; } if(BARRIER != 0 && BARRIER != 1){ printf("You need to chose a valid barrier for the void detection! Your choice were %d.\n", BARRIER); exit(0); } if(Om0>1.0 || Om0<0.0){ printf("Your Omega _{m} = %f! Put some valid value between 0.0 and 1.0.\n", Om0); exit(0); } if(dc < 0.0){ printf("Your delta_{c} = %f < 0. Using the fit.\n", dc); dc = 1.686*pow(Omz, 0.0055); } if(dc == 0.0) dc = 1.686*pow(Omz, 0.0055); if(out_halos > 1 && theta_min > 1.0){ printf("Theta min must be equal or smaller than 1! Setting it to 1.\n"); theta_min = 1.0; cos_min = -1.0; } if(out_halos > 1 && LoS[0] == 0.0 && LoS[1] == 0.0 && LoS[2] == 0.0){ printf("You must give a non vanishing vector for the direction of the line of sight!\n"); exit(0); } if(out_collapse == 1 && out_halos < 2){ printf("It is not possible to save the information about the collapsed particles without the creation of a light cone. Ignoring this parameter.\n"); out_collapse = 0; } printf("\nRunning the ExSHalos!\n\ Omega_m = %.3f, z = %.3f, Growth = %.3f, H = %.2f, d_c = %.3f and Delta_virial = %.1f\n\ L = (%.5f, %.5f, %.5f), N_cells = (%d, %d, %d), M_tot = %.5e, M_cell = %.5e and seed = %d.\n", Omz, redshift, Growth, Hz, dc, Dv, Lx, Ly, Lz, nx, ny, nz, Mtot, Mcell, seed); omp_set_num_threads(N_cores); //Set the number of cores used by the openmp /**************************************/ /* Constructing the density grids */ /**************************************/ printf("\nConstructing the density grid in real and fourier space!\n"); /*Opennning the power spectrum file*/ power = fopen(powerfile, "r"); if (power == NULL) { printf("Unable to open %s\n", powerfile); exit(0); } /*Measuring the number of k's*/ Nk = -1; while(!feof(power)){ fscanf(power, "%f %f", &kx, &ky); Nk ++; } rewind(power); /*Reading the power spectrum*/ K = (double *)malloc(Nk*sizeof(double)); check_memory(K, "K") P = (double *)malloc(Nk*sizeof(double)); check_memory(P, "P") for(i=0;i<Nk;i++){ fscanf(power, "%lf %lf", &K[i], &P[i]); P[i] = pow((double)Growth, 2.0)*P[i]; } fclose(power); /*Evaluating the Sigma(R)*/ Nr = Nk; R = (double *)malloc(Nr*sizeof(double)); check_memory(R, "R") M = (double *)malloc(Nr*sizeof(double)); check_memory(M, "M") Sig = (double *)malloc(Nr*sizeof(double)); check_memory(Sig, "Sig") Rmin = (double)pow(Mcell*0.9*3.0/(4.0*M_PI*rhomz), 1.0/3.0); Rmax = (double)pow(M_max*3.0/(4.0*M_PI*rhomz), 1.0/3.0); for(i=0;i<Nr;i++){ R[i] = pow(10, log10(Rmin) + i*(log10(Rmax) - log10(Rmin))/(Nr-1)); M[i] = 4.0/3.0*M_PI*(double)rhomz*pow(R[i], 3); } for(i=0;i<Nr;i++) Sig[i] = sqrt(calc_sigma(K, P, Nk, R[i])); /*Interpolating the Sigma(M)*/ gsl_interp_accel *acc = gsl_interp_accel_alloc(); gsl_spline *spline = gsl_spline_alloc(gsl_interp_cspline, Nr); gsl_spline_init(spline, M, Sig, Nr); /*Evaluate the integral of the mass function*/ double *Int; Int = (double *)malloc(Nr*sizeof(double)); check_memory(Int, "Int") Int[0] = 0.0; for(i=1;i<Nr;i++) Int[i] = Int[i-1] - (log(Sig[i]) - log(Sig[i-1]))/2.0*(fh(Sig[i], 1, (double) dc)/pow(R[i], -3.0) + fh(Sig[i-1], 1, (double) dc)/pow(R[i-1], -3.0)); /*Interpolate the integral of the mass function as function of mass and its inverse*/ gsl_interp_accel *acc_I = gsl_interp_accel_alloc(); gsl_interp_accel *acc_InvI = gsl_interp_accel_alloc(); gsl_spline *spline_I = gsl_spline_alloc(gsl_interp_cspline, Nr); gsl_spline *spline_InvI = gsl_spline_alloc(gsl_interp_cspline, Nr); gsl_spline_init(spline_I, M, Int, Nr); gsl_spline_init(spline_InvI, Int, M, Nr); free(Int); /*Compute the Sigma as function of the number of cells in the halo*/ Ncells = floor(M_max/Mcell); Sig_grid = (float *)malloc(Ncells*sizeof(float)); check_memory(Sig_grid, "Sig_grid") Sig_grid[0] = 1e+30; for(i=1;i<Ncells;i++) Sig_grid[i] = pow(gsl_spline_eval(spline, i*Mcell, acc), 2.0); gsl_spline_free(spline); gsl_interp_accel_free(acc); free(R); free(M); free(Sig); /*Read the density grid*/ if(DEN_GRID == 0){ delta = (float*)fftwf_malloc((size_t)nx*(size_t)ny*(size_t)nz*sizeof(float)); check_memory(delta, "delta") printf("Reading the density grid\n"); den_grid = fopen(denfile, "rb"); if (den_grid == NULL) { printf("Unable to open %s\n", denfile); exit(0); } fread(&nx, sizeof(int), 1, den_grid); fread(&ny, sizeof(int), 1, den_grid); fread(&nz, sizeof(int), 1, den_grid); fread(&Lc, sizeof(float), 1, den_grid); for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++){ size_t ind = (size_t)(i*ny + j)*(size_t)nz + (size_t)k; fread(&delta[ind], sizeof(float), 1, den_grid); delta[ind] = Growth*delta[ind]; } fclose(den_grid); } /*Construct the density grid*/ if(DEN_GRID == 1){ /*Compute the Power spectrum in the box*/ R_xi = (double *)malloc(Nk*sizeof(double)); check_memory(R_xi, "R_xi") Xi = (double *)malloc(Nk*sizeof(double)); check_memory(Xi, "Xi") pk2xi(Nk, K, P, R_xi, Xi); for(i=0;i<Nk;i++) if(R_xi[i] > (double)pow(Lx*Ly*Lz, 1.0/3.0)/2.0) Xi[i] = 0.0; xi2pk(Nk, R_xi, Xi, K, P); free(R_xi); free(Xi); /*Interpolate the power spectrum*/ acc = gsl_interp_accel_alloc(); spline = gsl_spline_alloc(gsl_interp_cspline, Nk); gsl_spline_init(spline, K, P, Nk); free(K); free(P); /*Allocating the density grids*/ delta = (float*)fftwf_malloc((size_t)nx*(size_t)ny*(size_t)nz*sizeof(float)); check_memory(delta, "delta") deltak_tmp = (fftwf_complex *) fftwf_malloc((size_t)nx*(size_t)ny*(size_t)nz2*sizeof(fftwf_complex)); check_memory(deltak_tmp, "deltak_tmp") deltak = (fftwf_complex *) fftwf_malloc((size_t)nx*(size_t)ny*(size_t)nz2*sizeof(fftwf_complex)); check_memory(deltak, "deltak") /*Alloc the needed quantities for the random generator*/ gsl_rng *rng_ptr; rng_ptr = gsl_rng_alloc (gsl_rng_taus); gsl_rng_set(rng_ptr, seed); /*Constructing the Fourier space density grid*/ #pragma omp parallel for private(i, j, k, kx, ky, kz, kmod, std) for(i=0;i<nx;i++){ if(2*i<nx) kx = (float)i*klx; else kx = (float)(i-nx)*klx; for(j=0;j<ny;j++){ if(2*j<ny) ky = (float)j*kly; else ky = (float)(j-ny)*kly; for(k=0;k<nz2;k++){ kz = (float)k*klz; if(k == nz/2) kz = -(float)nz/2.0*klz; size_t ind = (size_t)(i*ny + j)*(size_t)nz2 + (size_t)k; kmod = sqrt(kx*kx + ky*ky + kz*kz); if(kmod == 0.0) kmod = pow(klx*kly*klz, 1.0/3.0)/4.0; std = sqrt(gsl_spline_eval(spline, kmod, acc)/2.0); /*Generate Gaussian random number with std*/ deltak[ind][0] = (float)gsl_ran_gaussian(rng_ptr, std); deltak[ind][1] = (float)gsl_ran_gaussian(rng_ptr, std); deltak_tmp[ind][0] = deltak[ind][0]; deltak_tmp[ind][1] = deltak[ind][1]; if(isnan(deltak_tmp[ind][0])) printf("Problem with deltak_tmp[%ld][0]\n", ind); if(isnan(deltak_tmp[ind][1])) printf("Problem with deltak_tmp[%ld][1]\n", ind); } } } gsl_spline_free(spline); gsl_interp_accel_free(acc); gsl_rng_free (rng_ptr); /*Execute the FFTW3 to compute the density grid in real space*/ p1 = fftwf_plan_dft_c2r_3d(nx, ny, nz, deltak_tmp, delta, FFTW_ESTIMATE); fftwf_execute(p1); fftwf_free(deltak_tmp); /*Save the density grid*/ if(out_inter == 1 || out_inter == 3){ printf("Saving the density grid\n"); den_grid = fopen(denfile, "wb"); if (den_grid == NULL) { printf("Unable to open %s\n", denfile); exit(0); } fwrite(&nx, sizeof(int), 1, den_grid); fwrite(&ny, sizeof(int), 1, den_grid); fwrite(&nz, sizeof(int), 1, den_grid); fwrite(&Lc, sizeof(float), 1, den_grid); for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++){ size_t ind = (size_t)(i*ny + j)*(size_t)nz + (size_t)k; fwrite(&delta[ind], sizeof(float), 1, den_grid); } fclose(den_grid); } } /*Compute the mean and std of the linear density field*/ kx = 0.0; ky = 0.0; for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++){ size_t ind = (size_t)(i*ny + j)*(size_t)nz + (size_t)k; delta[ind] = delta[ind]*Normx; kx += delta[ind]*delta[ind]; ky += delta[ind]; } kx = kx/((float)nx*(float)ny*(float)nz); ky = ky/((float)nx*(float)ny*(float)nz); printf("Mean = %f and Sigma = %f\n", ky, sqrt(kx - ky*ky)); /*************************/ /* Finding the halos */ /*************************/ if(out_halos != 0){ printf("\nFinding the spherical halos!\n"); /*Alloc the flag array*/ flag = (int *)malloc((size_t)nx*(size_t)ny*(size_t)nz*sizeof(int)); check_memory(flag, "flag") /*Initialize the flag array*/ for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++){ size_t ind = (size_t)(i*ny + j)*(size_t)nz + (size_t)k; flag[ind] = -1; } /*Counting the number of peaks*/ np = 0; for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++){ size_t ind = (size_t)(i*ny + j)*(size_t)nz + (size_t)k; den = delta[ind]; if(den > delta[(size_t)(cysum(i, 1, nx)*ny + j)*(size_t)nz + (size_t)k] && den > delta[(size_t)(cysum(i, -1, nx)*ny + j)*(size_t)nz + (size_t)k] && den > delta[(size_t)(i*ny + cysum(j, 1, ny))*(size_t)nz + (size_t)k] && den > delta[(size_t)(i*ny + cysum(j, -1, ny))*(size_t)nz + (size_t)k] && den > delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 1, nz)] && den > delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -1, nz)]) np++; } /*Alloc the array with the peaks and final halos*/ peaks = (PEAKS *)malloc(np*sizeof(PEAKS)); halos = (HALOS *)malloc(np*sizeof(HALOS)); cont = 0; /*Save the position and density of each peak*/ for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++){ size_t ind = (size_t)(i*ny + j)*(size_t)nz + (size_t)k; den = delta[ind]; if(den > delta[(size_t)(cysum(i, 1, nx)*ny + j)*(size_t)nz + (size_t)k] && den > delta[(size_t)(cysum(i, -1, nx)*ny + j)*(size_t)nz + (size_t)k] && den > delta[(size_t)(i*ny + cysum(j, 1, ny))*(size_t)nz + (size_t)k] && den > delta[(size_t)(i*ny + cysum(j, -1, ny))*(size_t)nz + (size_t)k] && den > delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 1, nz)] && den > delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -1, nz)]){ peaks[cont].x[0] = i; peaks[cont].x[1] = j; peaks[cont].x[2] = k; peaks[cont].den = den; cont ++; } } /*Check the new number of peaks and elements in the peaks array*/ if(cont != np){ printf("The number of peaks does not match. %ld != %ld!\n", np, cont); exit(0); } /*Sort the peaks*/ quickSort_peaks(peaks, 0, np-1); /*Grow the spherical halos around the density peaks*/ nh = 0; printf("We have %ld peaks\n", np); for(l=0;l<np;l++){ /*If this peak is already in a halo jump to teh next one*/ if(flag[(size_t)(peaks[l].x[0]*ny + peaks[l].x[1])*(size_t)nz + (size_t)peaks[l].x[2]] != -1) continue; /*Check if this peak is near to the slice used to construct the light cone*/ if(out_halos == 2 || out_halos == 3){ m = 1; for(i=-Nrep_x;i<=Nrep_x;i++) for(j=-Nrep_y;j<=Nrep_y;j++) for(k=-Nrep_z;k<=Nrep_z;k++){ /*Compute the distance for this replic*/ Pos[0] = (peaks[l].x[0] + 0.5)*Lc + Lx*i - Pobs[0]; Pos[1] = (peaks[l].x[1] + 0.5)*Lc + Ly*j - Pobs[1]; Pos[2] = (peaks[l].x[2] + 0.5)*Lc + Lz*k - Pobs[2]; dist = 0.0; for(m=0;m<3;m++) dist += Pos[m]*Pos[m]; dist = sqrt(dist); if(dist <= dist_min - Rmax || dist > dist_max + Rmax) m = 0; /*Compute the angle theta*/ cost = 0.0; for(m=0;m<3;m++) cost += Pos[m]*LoS[m]; cost = cost/dist; if(theta_min + Rmax/dist < M_PI && cost < cos(theta_min + Rmax/dist)) m = 0; } if(m == 0) continue; } den = peaks[l].den; den_tmp = peaks[l].den; cont = 0; cont_tmp = 1; grows_tmp = 0; /*Grows the shells up to the minimum of the barrier*/ while(den_tmp >= Barrier(Sig_grid[Ncells - 1], dc, BARRIER, EB_a, EB_b, EB_alpha)){ if(cont < cont_tmp) grows = grows_tmp; grows_tmp ++; den = den_tmp; cont = cont_tmp; den_tmp = den*(float)cont; tmp = floor(sqrt((double) grows_tmp)); if(tmp > nmin/2) tmp = nmin/2; for(i=-tmp;i<=tmp;i++) for(j=-tmp;j<=tmp;j++) for(k=-tmp;k<=tmp;k++) if(dist2(i, j, k) == grows_tmp){ size_t ind = (size_t)(cysum(peaks[l].x[0], i, nx)*ny + cysum(peaks[l].x[1], j, ny))*(size_t)nz + (size_t)cysum(peaks[l].x[2], k, nz); if(flag[ind] != -1) den_tmp += -Mtot; else den_tmp += delta[ind]; cont_tmp ++; } den_tmp = den_tmp/(float)cont_tmp; } /*Decrease the shells up to the correct value of the barrier*/ while(den < Barrier(Sig_grid[cont], dc, BARRIER, EB_a, EB_b, EB_alpha) && cont > 0){ den_tmp = den; cont_tmp = cont; den = den*(float)cont; tmp = floor(sqrt((double) grows)); if(tmp > nmin/2) tmp = nmin/2; for(i=-tmp;i<=tmp;i++) for(j=-tmp;j<=tmp;j++) for(k=-tmp;k<=tmp;k++) if(dist2(i, j, k) == grows){ size_t ind = (size_t)(cysum(peaks[l].x[0], i, nx)*ny + cysum(peaks[l].x[1], j, ny))*(size_t)nz + (size_t)cysum(peaks[l].x[2], k, nz); den -= delta[ind]; cont --; } if(cont > 0) den = den/(float)cont; if(cont < cont_tmp) grows_tmp = grows; grows --; } if(cont == 0) continue; /*Put the correct flags to the cells*/ tmp = floor(sqrt((double) grows_tmp)); for(i=-tmp;i<=tmp;i++) for(j=-tmp;j<=tmp;j++) for(k=-tmp;k<=tmp;k++) if(dist2(i, j, k) < grows_tmp){ size_t ind = (size_t)(cysum(peaks[l].x[0], i, nx)*ny + cysum(peaks[l].x[1], j, ny))*(size_t)nz + (size_t)cysum(peaks[l].x[2], k, nz); if(flag[ind] != -1) printf("(1): This flag != -1! Flag = %d and the new one is %ld\n", flag[ind], nh); flag[ind] = nh; } /*Save the halo information*/ if(cont >= Nmin){ halos[nh].cont = cont; for(i=0;i<3;i++) halos[nh].x[i] = peaks[l].x[i]; nh ++; } else{ for(i=-tmp;i<=tmp;i++) for(j=-tmp;j<=tmp;j++) for(k=-tmp;k<=tmp;k++) if(dist2(i, j, k) < grows_tmp){ size_t ind = (size_t)(cysum(peaks[l].x[0], i, nx)*ny + cysum(peaks[l].x[1], j, ny))*(size_t)nz + (size_t)cysum(peaks[l].x[2], k, nz); flag[ind] = -2; } } } free(peaks); free(Sig_grid); /*Find the possible number of particles in a halo sphere = (int *)malloc(Ncells*sizeof(int)); m = 0; for(l=0;l<10000;l++){ if(l%100 == 0) printf("l = %ld\n", l); tmp = floor(sqrt((float) l)); cont = 0; for(i=-tmp;i<=tmp;i++) for(j=-tmp;j<=tmp;j++) for(k=-tmp;k<=tmp;k++) if(dist2(i, j, k) == l) cont ++; if(cont > 0){ if(m > 0) sphere[m] = sphere[m-1] + cont; else sphere[m] = cont; m ++; } } /*Save this information den_grid = fopen("Spheres.dat", "wb"); if (den_grid == NULL) { printf("Unable to open spheres.dat\n"); exit(0); } fwrite(&m, sizeof(int), 1, den_grid); for(i=0;i<m;i++) fwrite(&sphere[i], sizeof(int), 1, den_grid); fclose(den_grid);*/ /*Read the data with the number of cells in each sphere*/ den_grid = fopen("Spheres.dat", "rb"); if (den_grid == NULL) { printf("Unable to open spheres.dat\n"); exit(0); } fread(&m, sizeof(int), 1, den_grid); sphere = (int *)malloc(m*sizeof(int)); for(i=0;i<m;i++) fread(&sphere[i], sizeof(int), 1, den_grid); fclose(den_grid); printf("We have %ld halos\n", nh); } /********************************/ /* Displacing the particles */ /********************************/ printf("\nDisplacing the particles using 1LPT!\n"); /*Define the arrays to store the final position, velocity and mass of each halo*/ float **velh, **posh, *Massh; if(out_halos != 0){ gsl_rng *rng_ptr; rng_ptr = gsl_rng_alloc (gsl_rng_taus); gsl_rng_set(rng_ptr, seed); Massh = (float *)malloc(nh*sizeof(float)); velh = (float **)malloc(nh*sizeof(float *)); posh = (float **)malloc(nh*sizeof(float *)); for(i=0;i<nh;i++){ velh[i] = (float *)malloc(3*sizeof(float)); posh[i] = (float *)malloc(3*sizeof(float)); for(j=0;j<3;j++){ posh[i][j] = 0.0; velh[i][j] = 0.0; } cont = Next_Count(sphere, Ncells, halos[i].cont); den_tmp = gsl_spline_eval(spline_I, halos[i].cont*Mcell, acc_I) + (gsl_spline_eval(spline_I, sphere[cont]*Mcell, acc_I) - gsl_spline_eval(spline_I, halos[i].cont*Mcell, acc_I))*gsl_rng_uniform(rng_ptr); Massh[i] = gsl_spline_eval(spline_InvI, den_tmp, acc_InvI); } free(sphere); } /*Read the displacement field*/ if(DISP_CAT == 0){ /*Open the output file for the displacement field*/ printf("Reading the displacement field\n"); disp_cat = fopen(dispfile, "rb"); if (disp_cat == NULL) { printf("Unable to open %s\n", dispfile); exit(0); } fread(&nx, sizeof(int), 1, disp_cat); fread(&ny, sizeof(int), 1, disp_cat); fread(&nz, sizeof(int), 1, disp_cat); fread(&Lc, sizeof(float), 1, disp_cat); /*Read the displacement and add to each halo*/ for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++){ size_t ind = (size_t)(i*ny + j)*(size_t)nz + (size_t)k; if(DO_2LPT == 0){ fread(&kx, sizeof(float), 1, disp_cat); fread(&ky, sizeof(float), 1, disp_cat); fread(&kz, sizeof(float), 1, disp_cat); if(out_halos != 0){ tmp = flag[ind]; if(tmp < 0) continue; posh[tmp][0] += Growth*kx; posh[tmp][1] += Growth*ky; posh[tmp][2] += Growth*kz; velh[tmp][0] += Growth*pow(Omz, 5.0/9.0)*Hz/(1.0 + redshift)*kx; velh[tmp][1] += Growth*pow(Omz, 5.0/9.0)*Hz/(1.0 + redshift)*ky; velh[tmp][2] += Growth*pow(Omz, 5.0/9.0)*Hz/(1.0 + redshift)*kz; } } else{ fread(&kx, sizeof(float), 1, disp_cat); fread(&ky, sizeof(float), 1, disp_cat); fread(&kz, sizeof(float), 1, disp_cat); fread(&factx, sizeof(float), 1, disp_cat); fread(&facty, sizeof(float), 1, disp_cat); fread(&factz, sizeof(float), 1, disp_cat); if(out_halos != 0){ tmp = flag[ind]; if(tmp < 0) continue; posh[tmp][0] += Growth*(kx - Growth*3.0/7.0*pow(Omz, -1.0/143)*factx); posh[tmp][1] += Growth*(ky - Growth*3.0/7.0*pow(Omz, -1.0/143)*facty); posh[tmp][2] += Growth*(kz - Growth*3.0/7.0*pow(Omz, -1.0/143)*factz); velh[tmp][0] += Growth*(pow(Omz, 5.0/9.0)*Hz/(1.0 + redshift)*kx); //- Growth*3.0/7.0*pow(Omz, -1.0/143)*2.0*pow(Omz, 6.0/11.0)*Hz/(1.0 + redshift)*factx); velh[tmp][1] += Growth*(pow(Omz, 5.0/9.0)*Hz/(1.0 + redshift)*ky); //- Growth*3.0/7.0*pow(Omz, -1.0/143)*2.0*pow(Omz, 6.0/11.0)*Hz/(1.0 + redshift)*facty); velh[tmp][2] += Growth*(pow(Omz, 5.0/9.0)*Hz/(1.0 + redshift)*kz); //- Growth*3.0/7.0*pow(Omz, -1.0/143)*2.0*pow(Omz, 6.0/11.0)*Hz/(1.0 + redshift)*factz); } } } fclose(disp_cat); } /*Compute the displacement field*/ if(DISP_CAT == 1){ /*Define the arrays with the displacement field used in 2LPT*/ float *S1, *S2, *S3; if(DO_2LPT == 1){ S1 = (float *)malloc((size_t)nx*(size_t)ny*(size_t)nz*sizeof(float)); S2 = (float *)malloc((size_t)nx*(size_t)ny*(size_t)nz*sizeof(float)); S3 = (float *)malloc((size_t)nx*(size_t)ny*(size_t)nz*sizeof(float)); } /*Alloc deltak*/ if(DEN_GRID == 0){ deltak = (fftwf_complex *) fftwf_malloc((size_t)nx*(size_t)ny*(size_t)nz2*sizeof(fftwf_complex)); check_memory(deltak, "deltak") } /*Redefine the FFTW3 plan to compute the displacements*/ fftwf_destroy_plan(p1); p1 = NULL; p1 = fftwf_plan_dft_c2r_3d(nx, ny, nz, deltak, delta, FFTW_ESTIMATE); /*Divide the fourier space density by the green's function*/ #pragma omp parallel for private(i, j, k, kx, ky, kz, factx, facty, factz, fact) for(i=0;i<nx;i++){ if(2*i<nx) kx = i*klx; else kx = (i-nx)*klx; factx = 1.0/90.0*(2.0*cos(3.0*kx*Lc) - 27.0*cos(2.0*kx*Lc) + 270.0*cos(kx*Lc) - 245.0)/(Lc*Lc); for(j=0;j<ny;j++){ if(2*j<ny) ky = j*kly; else ky = (j-ny)*kly; facty = 1.0/90.0*(2.0*cos(3.0*ky*Lc) - 27.0*cos(2.0*ky*Lc) + 270.0*cos(ky*Lc) - 245.0)/(Lc*Lc); for(k=0;k<nz2;k++){ kz = k*klz; if(k == nz/2) kz = -(float)nz/2.0*klz; factz = 1.0/90.0*(2.0*cos(3.0*kz*Lc) - 27.0*cos(2.0*kz*Lc) + 270.0*cos(kz*Lc) - 245.0)/(Lc*Lc); size_t ind = (size_t)(i*ny + j)*(size_t)nz2 + (size_t)k; if(kx != 0.0 || ky != 0.0 || kz != 0.0){ fact = factx + facty + factz; deltak[ind][0] = deltak[ind][0]/fact; deltak[ind][1] = deltak[ind][1]/fact; } else{ deltak[ind][0] = 0.0; deltak[ind][1] = 0.0; } } } } /*Compute the potential at first order*/ fftwf_execute(p1); /*Compute the first order displacements and update the position and velocity of each halo*/ if(DO_2LPT == 1){ #pragma omp parallel for private(i, j, k, tmp) for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++){ size_t ind = (size_t)(i*ny + j)*(size_t)nz + (size_t)k; S1[ind] = -(1.0*delta[(size_t)(cysum(i, 3, nx)*ny + j)*(size_t)nz + (size_t)k] - 9.0*delta[(size_t)(cysum(i, 2, nx)*ny + j)*(size_t)nz + (size_t)k] + 45.0*delta[(size_t)(cysum(i, 1, nx)*ny + j)*(size_t)nz + (size_t)k] - 45.0*delta[(size_t)(cysum(i, -1, nx)*ny + j)*(size_t)nz + (size_t)k] + 9.0*delta[(size_t)(cysum(i, -2, nx)*ny + j)*(size_t)nz + (size_t)k] - 1.0*delta[(size_t)(cysum(i, -3, nx)*ny + j)*(size_t)nz + (size_t)k])*Normx/(60.0*Lc); S2[ind] = -(1.0*delta[(size_t)(i*ny + cysum(j, 3, ny))*(size_t)nz + (size_t)k] - 9.0*delta[(size_t)(i*ny + cysum(j, 2, ny))*(size_t)nz + (size_t)k] + 45.0*delta[(size_t)(i*ny + cysum(j, 1, ny))*(size_t)nz + (size_t)k] - 45.0*delta[(size_t)(i*nx + cysum(j, -1, ny))*(size_t)nz + (size_t)k] + 9.0*delta[(size_t)(i*ny + cysum(j, -2, ny))*(size_t)nz + (size_t)k] - 1.0*delta[(size_t)(i*ny + cysum(j, -3, ny))*(size_t)nz + (size_t)k])*Normx/(60.0*Lc); S3[ind] = -(1.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 3, nz)] - 9.0*delta[(size_t)(i*ny + j)*nz + (size_t)cysum(k, 2, nz)] + 45.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 1, nz)] - 45.0*delta[(size_t)(i*ny + j)*(size_t)nz + cysum(k, -1, nz)] + 9.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -2, nz)] - 1.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -3, nz)])*Normx/(60.0*Lc); if(out_halos != 0){ tmp = flag[ind]; if(tmp < 0) continue; posh[tmp][0] += S1[ind]; posh[tmp][1] += S2[ind]; posh[tmp][2] += S3[ind]; velh[tmp][0] += pow(Omz, 5.0/9.0)*Hz/(1.0 + redshift)*S1[ind]; velh[tmp][1] += pow(Omz, 5.0/9.0)*Hz/(1.0 + redshift)*S2[ind]; velh[tmp][2] += pow(Omz, 5.0/9.0)*Hz/(1.0 + redshift)*S3[ind]; } } } else{ /*Open the output file for the displacement field*/ if(out_inter == 2 || out_inter == 3){ printf("Saving the displaced particles\n"); disp_cat = fopen(dispfile, "wb"); if (disp_cat == NULL) { printf("Unable to open %s\n", dispfile); exit(0); } fwrite(&nx, sizeof(int), 1, disp_cat); fwrite(&ny, sizeof(int), 1, disp_cat); fwrite(&nz, sizeof(int), 1, disp_cat); fwrite(&Lc, sizeof(float), 1, disp_cat); } #pragma omp parallel for private(i, j, k, tmp, kx, ky, kz) for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++){ size_t ind = (size_t)(i*ny + j)*(size_t)nz + (size_t)k; /*save the displacement field*/ if(out_inter == 2 || out_inter == 3){ kx = -(1.0*delta[(size_t)(cysum(i, 3, nx)*ny + j)*(size_t)nz + (size_t)k] - 9.0*delta[(size_t)(cysum(i, 2, nx)*ny + j)*(size_t)nz + (size_t)k] + 45.0*delta[(size_t)(cysum(i, 1, nx)*ny + j)*(size_t)nz + (size_t)k] - 45.0*delta[(size_t)(cysum(i, -1, nx)*ny + j)*(size_t)nz + (size_t)k] + 9.0*delta[(size_t)(cysum(i, -2, nx)*ny + j)*(size_t)nz + (size_t)k] - 1.0*delta[(size_t)(cysum(i, -3, nx)*ny + j)*(size_t)nz + (size_t)k])*Normx/(60.0*Lc); ky = -(1.0*delta[(size_t)(i*ny + cysum(j, 3, ny))*(size_t)nz + (size_t)k] - 9.0*delta[(size_t)(i*ny + cysum(j, 2, ny))*(size_t)nz + (size_t)k] + 45.0*delta[(size_t)(i*ny + cysum(j, 1, ny))*(size_t)nz + (size_t)k] - 45.0*delta[(size_t)(i*nx + cysum(j, -1, ny))*(size_t)nz + (size_t)k] + 9.0*delta[(size_t)(i*ny + cysum(j, -2, ny))*(size_t)nz + (size_t)k] - 1.0*delta[(size_t)(i*ny + cysum(j, -3, ny))*(size_t)nz + (size_t)k])*Normx/(60.0*Lc); kz = -(1.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 3, nz)] - 9.0*delta[(size_t)(i*ny + j)*nz + (size_t)cysum(k, 2, nz)] + 45.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 1, nz)] - 45.0*delta[(size_t)(i*ny + j)*(size_t)nz + cysum(k, -1, nz)] + 9.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -2, nz)] - 1.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -3, nz)])*Normx/(60.0*Lc); fwrite(&kx, sizeof(float), 1, disp_cat); fwrite(&ky, sizeof(float), 1, disp_cat); fwrite(&kz, sizeof(float), 1, disp_cat); if(out_halos != 0){ tmp = flag[ind]; if(tmp < 0) continue; posh[tmp][0] += kx; posh[tmp][1] += ky; posh[tmp][2] += kz; velh[tmp][0] += pow(Omz, 5.0/9.0)*Hz/(1.0 + redshift)*kx; velh[tmp][1] += pow(Omz, 5.0/9.0)*Hz/(1.0 + redshift)*ky; velh[tmp][2] += pow(Omz, 5.0/9.0)*Hz/(1.0 + redshift)*kz; } } /*Do not save the displacements*/ else if(out_halos != 0){ tmp = flag[ind]; if(tmp < 0) continue; kx = -(1.0*delta[(size_t)(cysum(i, 3, nx)*ny + j)*(size_t)nz + (size_t)k] - 9.0*delta[(size_t)(cysum(i, 2, nx)*ny + j)*(size_t)nz + (size_t)k] + 45.0*delta[(size_t)(cysum(i, 1, nx)*ny + j)*(size_t)nz + (size_t)k] - 45.0*delta[(size_t)(cysum(i, -1, nx)*ny + j)*(size_t)nz + (size_t)k] + 9.0*delta[(size_t)(cysum(i, -2, nx)*ny + j)*(size_t)nz + (size_t)k] - 1.0*delta[(size_t)(cysum(i, -3, nx)*ny + j)*(size_t)nz + (size_t)k])*Normx/(60.0*Lc); ky = -(1.0*delta[(size_t)(i*ny + cysum(j, 3, ny))*(size_t)nz + (size_t)k] - 9.0*delta[(size_t)(i*ny + cysum(j, 2, ny))*(size_t)nz + (size_t)k] + 45.0*delta[(size_t)(i*ny + cysum(j, 1, ny))*(size_t)nz + (size_t)k] - 45.0*delta[(size_t)(i*nx + cysum(j, -1, ny))*(size_t)nz + (size_t)k] + 9.0*delta[(size_t)(i*ny + cysum(j, -2, ny))*(size_t)nz + (size_t)k] - 1.0*delta[(size_t)(i*ny + cysum(j, -3, ny))*(size_t)nz + (size_t)k])*Normx/(60.0*Lc); kz = -(1.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 3, nz)] - 9.0*delta[(size_t)(i*ny + j)*nz + (size_t)cysum(k, 2, nz)] + 45.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 1, nz)] - 45.0*delta[(size_t)(i*ny + j)*(size_t)nz + cysum(k, -1, nz)] + 9.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -2, nz)] - 1.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -3, nz)])*Normx/(60.0*Lc); posh[tmp][0] += kx; posh[tmp][1] += ky; posh[tmp][2] += kz; velh[tmp][0] += pow(Omz, 5.0/9.0)*Hz/(1.0 + redshift)*kx; velh[tmp][1] += pow(Omz, 5.0/9.0)*Hz/(1.0 + redshift)*ky; velh[tmp][2] += pow(Omz, 5.0/9.0)*Hz/(1.0 + redshift)*kz; } } if(out_inter == 2 || out_inter == 3) fclose(disp_cat); } if(DO_2LPT == 1){ printf("Displacing the particles using 2LPT!\n"); /*Evaluating the second order contribution*/ p2 = fftwf_plan_dft_r2c_3d(nx, ny, nz, delta, deltak, FFTW_ESTIMATE); /*Compute the second order "density"*/ #pragma omp parallel for private(i, j, k) for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++){ phixx = (1.0*S1[(size_t)(cysum(i, 3, nx)*ny + j)*(size_t)nz + (size_t)k] - 9.0*S1[(size_t)(cysum(i, 2, nx)*ny + j)*(size_t)nz + (size_t)k] + 45.0*S1[(size_t)(cysum(i, 1, nx)*ny + j)*(size_t)nz + (size_t)k] - 45.0*S1[(size_t)(cysum(i, -1, nx)*ny + j)*(size_t)nz + (size_t)k] + 9.0*S1[(size_t)(cysum(i, -2, nx)*ny + j)*(size_t)nz + (size_t)k] - 1.0*S1[(size_t)(cysum(i, -3, nx)*ny + j)*(size_t)nz + (size_t)k])/(60.0*Lc); phixy = (1.0*S1[(size_t)(i*ny + cysum(j, 3, ny))*(size_t)nz + (size_t)k] - 9.0*S1[(size_t)(i*ny + cysum(j, 2, ny))*(size_t)nz + (size_t)k] + 45.0*S1[(size_t)(i*ny + cysum(j, 1, ny))*(size_t)nz + (size_t)k] - 45.0*S1[(size_t)(i*nx + cysum(j, -1, ny))*(size_t)nz + (size_t)k] + 9.0*S1[(size_t)(i*ny + cysum(j, -2, ny))*(size_t)nz + (size_t)k] - 1.0*S1[(size_t)(i*ny + cysum(j, -3, ny))*(size_t)nz + (size_t)k])/(60.0*Lc); phixz = (1.0*S1[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 3, nz)] - 9.0*S1[(size_t)(i*ny + j)*nz + (size_t)cysum(k, 2, nz)] + 45.0*S1[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 1, nz)] - 45.0*S1[(size_t)(i*ny + j)*(size_t)nz + cysum(k, -1, nz)] + 9.0*S1[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -2, nz)] - 1.0*S1[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -3, nz)])/(60.0*Lc); phiyy = (1.0*S2[(size_t)(i*ny + cysum(j, 3, ny))*(size_t)nz + (size_t)k] - 9.0*S2[(size_t)(i*ny + cysum(j, 2, ny))*(size_t)nz + (size_t)k] + 45.0*S2[(size_t)(i*ny + cysum(j, 1, ny))*(size_t)nz + (size_t)k] - 45.0*S2[(size_t)(i*nx + cysum(j, -1, ny))*(size_t)nz + (size_t)k] + 9.0*S2[(size_t)(i*ny + cysum(j, -2, ny))*(size_t)nz + (size_t)k] - 1.0*S2[(size_t)(i*ny + cysum(j, -3, ny))*(size_t)nz + (size_t)k])/(60.0*Lc); phiyz = (1.0*S2[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 3, nz)] - 9.0*S2[(size_t)(i*ny + j)*nz + (size_t)cysum(k, 2, nz)] + 45.0*S2[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 1, nz)] - 45.0*S2[(size_t)(i*ny + j)*(size_t)nz + cysum(k, -1, nz)] + 9.0*S2[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -2, nz)] - 1.0*S2[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -3, nz)])/(60.0*Lc); phizz = (1.0*S3[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 3, nz)] - 9.0*S3[(size_t)(i*ny + j)*nz + (size_t)cysum(k, 2, nz)] + 45.0*S3[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 1, nz)] - 45.0*S3[(size_t)(i*ny + j)*(size_t)nz + cysum(k, -1, nz)] + 9.0*S3[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -2, nz)] - 1.0*S3[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -3, nz)])/(60.0*Lc); delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)k] = 1.0*(phixx*phiyy + phixx*phizz + phiyy*phizz - pow(phixy, 2.0) - pow(phixz, 2.0) - pow(phiyz, 2.0)); } /*Go to fourier space to solve the posson equation*/ fftwf_execute(p2); /*Divide the fourier space density by the green's function*/ #pragma omp parallel for private(i, j, k, kx, ky, kz, fact, factx, facty, factz) for(i=0;i<nx;i++){ if(2*i<nx) kx = i*klx; else kx = (i-nx)*klx; factx = 1.0/90.0*(2.0*cos(3.0*kx*Lc) - 27.0*cos(2.0*kx*Lc) + 270.0*cos(kx*Lc) - 245.0)/(Lc*Lc); for(j=0;j<ny;j++){ if(2*j<ny) ky = j*kly; else ky = (j-ny)*kly; facty = 1.0/90.0*(2.0*cos(3.0*ky*Lc) - 27.0*cos(2.0*ky*Lc) + 270.0*cos(ky*Lc) - 245.0)/(Lc*Lc); for(k=0;k<nz2;k++){ kz = k*klz; if(k == nz/2) kz = -(float)nz/2.0*klz; factz = 1.0/90.0*(2.0*cos(3.0*kz*Lc) - 27.0*cos(2.0*kz*Lc) + 270.0*cos(kz*Lc) - 245.0)/(Lc*Lc); size_t ind = (size_t)(i*ny + j)*(size_t)nz2 + (size_t)k; if(kx != 0.0 || ky != 0.0 || kz != 0.0){ fact = factx + facty + factz; deltak[ind][0] = deltak[ind][0]/fact*Normk; deltak[ind][1] = deltak[ind][1]/fact*Normk; } else{ deltak[ind][0] = 0.0; deltak[ind][1] = 0.0; } } } } /*Come back to real space*/ fftwf_execute(p1); /*Open the output file for the displacement field*/ if(out_inter == 2 || out_inter == 3){ printf("Saving the displaced particles\n"); disp_cat = fopen(dispfile, "wb"); if (disp_cat == NULL) { printf("Unable to open %s\n", dispfile); exit(0); } fwrite(&nx, sizeof(int), 1, disp_cat); fwrite(&ny, sizeof(int), 1, disp_cat); fwrite(&nz, sizeof(int), 1, disp_cat); fwrite(&Lc, sizeof(float), 1, disp_cat); } /*Compute the second order displacements and velocities*/ #pragma omp parallel for private(i, j, k, kx, ky, kz, tmp) for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++){ size_t ind = (size_t)(i*ny + j)*(size_t)nz + (size_t)k; /*save the displacement field*/ if(out_inter == 2 || out_inter == 3){ kx = (1.0*delta[(size_t)(cysum(i, 3, nx)*ny + j)*(size_t)nz + (size_t)k] - 9.0*delta[(size_t)(cysum(i, 2, nx)*ny + j)*(size_t)nz + (size_t)k] + 45.0*delta[(size_t)(cysum(i, 1, nx)*ny + j)*(size_t)nz + (size_t)k] - 45.0*delta[(size_t)(cysum(i, -1, nx)*ny + j)*(size_t)nz + (size_t)k] + 9.0*delta[(size_t)(cysum(i, -2, nx)*ny + j)*(size_t)nz + (size_t)k] - 1.0*delta[(size_t)(cysum(i, -3, nx)*ny + j)*(size_t)nz + (size_t)k])*Normx/(60.0*Lc); ky = (1.0*delta[(size_t)(i*ny + cysum(j, 3, ny))*(size_t)nz + (size_t)k] - 9.0*delta[(size_t)(i*ny + cysum(j, 2, ny))*(size_t)nz + (size_t)k] + 45.0*delta[(size_t)(i*ny + cysum(j, 1, ny))*(size_t)nz + (size_t)k] - 45.0*delta[(size_t)(i*nx + cysum(j, -1, ny))*(size_t)nz + (size_t)k] + 9.0*delta[(size_t)(i*ny + cysum(j, -2, ny))*(size_t)nz + (size_t)k] - 1.0*delta[(size_t)(i*ny + cysum(j, -3, ny))*(size_t)nz + (size_t)k])*Normx/(60.0*Lc); kz = (1.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 3, nz)] - 9.0*delta[(size_t)(i*ny + j)*nz + (size_t)cysum(k, 2, nz)] + 45.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 1, nz)] - 45.0*delta[(size_t)(i*ny + j)*(size_t)nz + cysum(k, -1, nz)] + 9.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -2, nz)] - 1.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -3, nz)])*Normx/(60.0*Lc); fwrite(&S1[ind], sizeof(float), 1, disp_cat); fwrite(&S2[ind], sizeof(float), 1, disp_cat); fwrite(&S3[ind], sizeof(float), 1, disp_cat); fwrite(&kx, sizeof(float), 1, disp_cat); fwrite(&ky, sizeof(float), 1, disp_cat); fwrite(&kz, sizeof(float), 1, disp_cat); if(out_halos != 0){ tmp = flag[ind]; if(tmp < 0) continue; kx = -3.0/7.0*pow(Omz, -1.0/143)*kx; ky = -3.0/7.0*pow(Omz, -1.0/143)*ky; kz = -3.0/7.0*pow(Omz, -1.0/143)*kz; posh[tmp][0] += kx; posh[tmp][1] += ky; posh[tmp][2] += kz; //velh[tmp][0] += 2.0*pow(Omz, 6.0/11.0)*Hz/(1.0 + redshift)*kx; //velh[tmp][1] += 2.0*pow(Omz, 6.0/11.0)*Hz/(1.0 + redshift)*ky; //velh[tmp][2] += 2.0*pow(Omz, 6.0/11.0)*Hz/(1.0 + redshift)*kz; } } /*Do not save the displacements*/ else if(out_halos != 0){ tmp = flag[ind]; if(tmp < 0) continue; kx = (1.0*delta[(size_t)(cysum(i, 3, nx)*ny + j)*(size_t)nz + (size_t)k] - 9.0*delta[(size_t)(cysum(i, 2, nx)*ny + j)*(size_t)nz + (size_t)k] + 45.0*delta[(size_t)(cysum(i, 1, nx)*ny + j)*(size_t)nz + (size_t)k] - 45.0*delta[(size_t)(cysum(i, -1, nx)*ny + j)*(size_t)nz + (size_t)k] + 9.0*delta[(size_t)(cysum(i, -2, nx)*ny + j)*(size_t)nz + (size_t)k] - 1.0*delta[(size_t)(cysum(i, -3, nx)*ny + j)*(size_t)nz + (size_t)k])*Normx/(60.0*Lc); ky = (1.0*delta[(size_t)(i*ny + cysum(j, 3, ny))*(size_t)nz + (size_t)k] - 9.0*delta[(size_t)(i*ny + cysum(j, 2, ny))*(size_t)nz + (size_t)k] + 45.0*delta[(size_t)(i*ny + cysum(j, 1, ny))*(size_t)nz + (size_t)k] - 45.0*delta[(size_t)(i*nx + cysum(j, -1, ny))*(size_t)nz + (size_t)k] + 9.0*delta[(size_t)(i*ny + cysum(j, -2, ny))*(size_t)nz + (size_t)k] - 1.0*delta[(size_t)(i*ny + cysum(j, -3, ny))*(size_t)nz + (size_t)k])*Normx/(60.0*Lc); kz = (1.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 3, nz)] - 9.0*delta[(size_t)(i*ny + j)*nz + (size_t)cysum(k, 2, nz)] + 45.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, 1, nz)] - 45.0*delta[(size_t)(i*ny + j)*(size_t)nz + cysum(k, -1, nz)] + 9.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -2, nz)] - 1.0*delta[(size_t)(i*ny + j)*(size_t)nz + (size_t)cysum(k, -3, nz)])*Normx/(60.0*Lc); kx = -3.0/7.0*pow(Omz, -1.0/143)*kx; ky = -3.0/7.0*pow(Omz, -1.0/143)*ky; kz = -3.0/7.0*pow(Omz, -1.0/143)*kz; posh[tmp][0] += kx; posh[tmp][1] += ky; posh[tmp][2] += kz; //velh[tmp][0] += 2.0*pow(Omz, 6.0/11.0)*Hz/(1.0 + redshift)*kx; //velh[tmp][1] += 2.0*pow(Omz, 6.0/11.0)*Hz/(1.0 + redshift)*ky; //velh[tmp][2] += 2.0*pow(Omz, 6.0/11.0)*Hz/(1.0 + redshift)*kz; } } if(out_inter == 2 || out_inter == 3) fclose(disp_cat); /*Free the FFTW memory*/ fftwf_destroy_plan(p2); free(S1); free(S2); free(S3); } fftwf_destroy_plan(p1); fftwf_free(deltak); } fftwf_free(delta); if(out_collapse == 0 && out_halos != 0) free(flag); /*Compute the final position and velocity of the halos*/ if(out_halos != 0){ for(i=0;i<nh;i++){ posh[i][0] = cysumf(halos[i].x[0]*Lc + Lc/2.0, posh[i][0]/halos[i].cont, Lx); posh[i][1] = cysumf(halos[i].x[1]*Lc + Lc/2.0, posh[i][1]/halos[i].cont, Ly); posh[i][2] = cysumf(halos[i].x[2]*Lc + Lc/2.0, posh[i][2]/halos[i].cont, Lz); velh[i][0] = velh[i][0]/halos[i].cont; velh[i][1] = velh[i][1]/halos[i].cont; velh[i][2] = velh[i][2]/halos[i].cont; } } /*Saving the positions and velocities in real space*/ if(out_halos == 1){ printf("Saving the halos\n"); halo_cat = fopen(halofile, "w"); if (halo_cat == NULL) { printf("Unable to open %s\n", halofile); exit(0); } fprintf(halo_cat, "%ld\n", nh); for(i=0;i<nh;i++){ fprintf(halo_cat, "%f %f %f %f %f %f %e %d\n", posh[i][0], posh[i][1], posh[i][2], velh[i][0], velh[i][1], velh[i][2], Massh[i], halos[i].cont); } fclose(halo_cat); } /*Putting galaxies in the halos*/ if(DO_HOD == 1){ printf("Saving the galaxies\n"); sprintf(halofile, "%s_gals.dat", argv[14]); halo_cat = fopen(halofile, "w"); if (halo_cat == NULL) { printf("Unable to open %s\n", halofile); exit(0); } cont = 0; for(i=0;i<nh;i++){ /*Compute the number of central and satellite galaxies*/ if(Ncentral(Massh[i], logMmin, siglogM) >= gsl_rng_uniform(rng_ptr)) Ncen = 1; else Ncen = 0; Nsat = gsl_ran_poisson(rng_ptr, (double) Nsatellite(Massh[i], logM0, logM1, alpha)); Ngals = Ncen + Nsat; if(Ngals == 0) continue; /*Save the central galaxy*/ if(Ncen == 1){ fprintf(halo_cat, "%f %f %f %f %f %f %d\n", posh[i][0], posh[i][1], posh[i][2], velh[i][0], velh[i][1], velh[i][2], i); cont ++; } /*Put the satellite galaxies following the NFW profile*/ if(Nsat > 0){ Rv = pow(3.0*Massh[i]/(4.0*M_PI*Dv*rhom), 1.0/3.0); C = f_c(Massh[i], (float) Mstar, z); A = log(1.0 + C) - C/(1.0 + C); } for(j=0;j<Nsat;j++){ phi = 2.0*M_PI*gsl_rng_uniform(rng_ptr); theta = M_PI*gsl_rng_uniform(rng_ptr); r = Generate_NFW(Rv, C, A, seed); kx = cysumf(posh[i][0], r*sin(theta)*cos(phi), Lx); ky = cysumf(posh[i][1], r*sin(theta)*sin(phi), Ly); kz = cysumf(posh[i][2], r*cos(theta), Lz); fprintf(halo_cat, "%f %f %f ", kx, ky, kz); kx = velh[i][0]; ky = velh[i][1]; kz = velh[i][2]; fprintf(halo_cat, "%f %f %f %d\n", kx, ky, kz, i); cont ++; } } fclose(halo_cat); n_bar = cont/(Lx*Ly*Lz); printf("n_bar = %f\n", n_bar); } /********************************/ /*Put the halos in the lightcone*/ /********************************/ if(out_halos == 2 || out_halos == 3){ printf("\nPutting the halos in the light cone!\n"); printf("The code is using (%d, %d, %d) replicas to construct the light cone.\n", Nrep_x, Nrep_y, Nrep_z); printf("This snapshot is in the range %f - %f [Mpc/h] with theta_min = %f.\n", dist_min, dist_max, theta_min); /*Open the light cone file*/ light_cat = fopen(lightfile, "wb"); if (light_cat == NULL) { printf("Unable to open %s\n", lightfile); exit(0); } cont = 0; fwrite(&cont, sizeof(long), 1, light_cat); /*Run over all the halos and save then in the light cone file*/ for(l=0;l<nh;l++){ for(i=-Nrep_x;i<=Nrep_x;i++) for(j=-Nrep_y;j<=Nrep_y;j++) for(k=-Nrep_z;k<=Nrep_z;k++){ /*Compute the distance for this replic*/ Pos[0] = posh[l][0] + Lx*i - Pobs[0]; Pos[1] = posh[l][1] + Ly*j - Pobs[1]; Pos[2] = posh[l][2] + Lz*k - Pobs[2]; dist = 0.0; for(m=0;m<3;m++) dist += Pos[m]*Pos[m]; dist = sqrt(dist); if(out_halos == 3){ /*Compute the distance in redshift space*/ vr = 0.0; for(m=0;m<3;m++) vr += velh[l][m]*Pos[m]; vr = vr/dist; for(m=0;m<3;m++) Pos[m] = Pos[m] + vr/Hz*(1.0 + redshift)*Pos[m]/dist; dist = dist + vr/Hz*(1.0 + redshift); } if(dist <= dist_min || dist > dist_max) continue; /*Compute the angle theta*/ cost = 0.0; for(m=0;m<3;m++) cost += Pos[m]*LoS[m]; cost = cost/dist; if(cost < cos_min) continue; /*Save the information about this halo*/ fwrite(&Pos[0], sizeof(float), 1, light_cat); fwrite(&Pos[1], sizeof(float), 1, light_cat); fwrite(&Pos[2], sizeof(float), 1, light_cat); fwrite(&velh[l][0], sizeof(float), 1, light_cat); fwrite(&velh[l][1], sizeof(float), 1, light_cat); fwrite(&velh[l][2], sizeof(float), 1, light_cat); fwrite(&Massh[l], sizeof(float), 1, light_cat); cont ++; /*Put galaxies in this halo (one type)*/ if(DO_HOD == 1){ /*Compute the number of central and satellite galaxies*/ if(Ncentral(Massh[l], logMmin, siglogM) >= gsl_rng_uniform(rng_ptr)) Ncen = 1; else Ncen = 0; Nsat = gsl_ran_poisson(rng_ptr, (double) Nsatellite(Massh[l], logM0, logM1, alpha)); Ngals = Ncen + Nsat; /*Save the total number of galaxies*/ fwrite(&Ngals, sizeof(int), 1, light_cat); /*Save the central galaxy*/ fwrite(&Pos[0], sizeof(float), 1, light_cat); fwrite(&Pos[1], sizeof(float), 1, light_cat); fwrite(&Pos[2], sizeof(float), 1, light_cat); fwrite(&velh[l][0], sizeof(float), 1, light_cat); fwrite(&velh[l][1], sizeof(float), 1, light_cat); fwrite(&velh[l][2], sizeof(float), 1, light_cat); /*Put the satellite galaxies following the NFW profile*/ if(Nsat > 0){ Rv = pow(3.0*Massh[l]/(4.0*M_PI*Dv*rhom), 1.0/3.0); C = f_c(Massh[l], (float) Mstar, z); A = log(1.0 + C) - C/(1.0 + C); } for(m=0;m<Nsat;m++){ phi = 2.0*M_PI*gsl_rng_uniform(rng_ptr); theta = M_PI*gsl_rng_uniform(rng_ptr); r = Generate_NFW(Rv, C, A, seed); kx = cysumf(Pos[0], r*sin(theta)*cos(phi), Lx); ky = cysumf(Pos[1], r*sin(theta)*sin(phi), Ly); kz = cysumf(Pos[2], r*cos(theta), Lz); fwrite(&kx, sizeof(float), 1, light_cat); fwrite(&ky, sizeof(float), 1, light_cat); fwrite(&kz, sizeof(float), 1, light_cat); kx = velh[i][0]; ky = velh[i][1]; kz = velh[i][2]; fwrite(&kx, sizeof(float), 1, light_cat); fwrite(&ky, sizeof(float), 1, light_cat); fwrite(&kz, sizeof(float), 1, light_cat); } } } } rewind(light_cat); fwrite(&cont, sizeof(long), 1, light_cat); fclose(light_cat); if(out_collapse == 1){ /*Open the file to save the information about the collapsed particles*/ collapse_cat = fopen(collapsefile, "wb"); if (collapse_cat == NULL) { printf("Unable to open %s\n", collapsefile); exit(0); } /*Save the information about the colapsed particles*/ int a, b, c; cont = 0; fwrite(&cont, sizeof(long), 1, collapse_cat); #pragma omp parallel for private(i, j, k, a, b, c, Pos, dist, cost, tmp) for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++){ size_t ind = (size_t)(i*ny + j)*(size_t)nz + (size_t)k; for(a=-Nrep_x;a<=Nrep_x;a++) for(b=-Nrep_y;b<=Nrep_y;b++) for(c=-Nrep_z;c<=Nrep_z;c++){ /*Compute the distance for this replic*/ Pos[0] = i*Lc + Lc/2.0 + Lx*a; Pos[1] = j*Lc + Lc/2.0 + Ly*b; Pos[2] = k*Lc + Lc/2.0 + Lz*c; dist = 0.0; for(m=0;m<3;m++) dist += Pos[m]*Pos[m]; dist = sqrt(dist); if(dist <= dist_min || dist > dist_max) continue; /*Compute the angle theta*/ cost = 0.0; for(m=0;m<3;m++) cost += Pos[m]*LoS[m]; cost = cost/dist; if(cost < cos_min) continue; tmp = flag[ind]; cont ++; fwrite(&ind, sizeof(size_t), 1, collapse_cat); fwrite(&tmp, sizeof(int), 1, collapse_cat); fwrite(&redshift, sizeof(float), 1, collapse_cat); } } rewind(collapse_cat); fwrite(&cont, sizeof(long), 1, collapse_cat); fclose(collapse_cat); free(flag); } } /*******************/ /* Free the memory */ /*******************/ gsl_spline_free(spline_I); gsl_spline_free(spline_InvI); gsl_interp_accel_free(acc_I); gsl_interp_accel_free(acc_InvI); if(out_halos != 0){ free(Massh); for(i=0;i<nh;i++){ free(velh[i]); free(posh[i]); } /*Free the rest*/ free(velh); free(posh); free(halos); gsl_rng_free(rng_ptr); } return 0; }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <fftw3.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_spline.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include "fftlog.h" #define check_memory(p, name) if(p == NULL){printf("Problems to alloc %s.\n", name); return 0;} #define Lc_MAX 1.0e+2 #define Mc_MIN 1.0e+5 #define M_max 6.0e+15 /* Structure for the peaks in the density field */ typedef struct Halos_centers { int x[3]; /* Index of the halo center */ float den; /* Density of the halo's central cell */ } PEAKS; /* Structure for the final halos */ typedef struct Halos { int x[3]; /* Index of central cell of teh halo */ int cont; /* Number of cells in the halo */ } HALOS; /* Barrier used for the halo definition */ float Barrier(float S, float dc, char barrier, float a, float b, float alpha) { float resp; /* The Press-Schechter barrier */ if (barrier == 0) resp = dc; /* The Sheth-Tormen barrier */ else if (barrier == 1) resp = sqrt(a) * dc * (1.0 + b * pow(S / (a * dc * dc), alpha)); return resp; } /* Partition function for the quicksort */ long int partition_peaks(PEAKS a[], long l, long r) { long i, j, k; PEAKS pivot, t; pivot.den = a[l].den; for (k = 0; k < 3; k++) pivot.x[k] = a[l].x[k]; i = l; j = r + 1; while (1) { do ++i; while (a[i].den >= pivot.den && i < r); do --j; while (a[j].den < pivot.den); if (i >= j) break; t.den = a[i].den; a[i].den = a[j].den; a[j].den = t.den; for (k = 0; k < 3; k++) { t.x[k] = a[i].x[k]; a[i].x[k] = a[j].x[k]; a[j].x[k] = t.x[k]; } } t.den = a[l].den; a[l].den = a[j].den; a[j].den = t.den; for (k = 0; k < 3; k++) { t.x[k] = a[l].x[k]; a[l].x[k] = a[j].x[k]; a[j].x[k] = t.x[k]; } return j; } /* The quicksort algorithm to sort the peaks list */ void quickSort_peaks(PEAKS a[], long l, long r) { long j; if (l < r) { //divide and conquer j = partition_peaks(a, l, r); quickSort_peaks(a, l, j - 1); quickSort_peaks(a, j + 1, r); } } /* Define the distance between two cells */ long int dist2(long int i, long int j, long int k) { long int resp; resp = i * i + j * j + k * k; return resp; } /* Define the cyclic sum for floats */ float cysumf(float x, float y, float L) { float resp; resp = x + y; if (resp >= L) resp -= L; if (resp < 0) resp += L; return resp; } /* Define the cyclic sum */ int cysum(int i, int j, int nd) { int resp; resp = i + j; if (resp >= nd) resp -= nd; if (resp < 0) resp += nd; return resp; } /* Window function in the Fourier space */ double W(double k, double R) { double resp; resp = 3.0 / (pow(k * R, 2)) * (sin(k * R) / (k * R) - cos(k * R)); return resp; } /* Evaluate the square root of matter variance */ double calc_sigma(double *k, double *P, int Nk, double R) { int i; double resp; resp = 0.0; for (i = 0; i < Nk - 1; i++) resp += (k[i + 1] - k[i]) / 2.0 * (P[i] * pow(k[i] * W(k[i], R), 2) + P[i + 1] * pow(k[i + 1] * W(k[i + 1], R), 2)); return resp / (2.0 * M_PI * M_PI); } /* Evaluate the mass function for a given sigma */ double fh(double sigma, int model, double dc) { double resp, nu; double B, d, e, f, g; //Press - Schechter if (model == 0) { nu = dc / sigma; resp = sqrt(2.0 / M_PI) * nu * exp(-nu * nu / 2.0); } //Tinker Delta = 300 else if (model == 1) { B = 0.466; d = 2.06; e = 0.99; f = 0.48; g = 1.310; resp = B * (pow(sigma / e, -d) + pow(sigma, -f)) * exp(-g / (sigma * sigma)); } return resp; } /* Find the index of the next sphere */ int Next_Count(int *spheres, int Ncells, int count) { int i, resp; for (i = 0; i < Ncells; i++) if (spheres[i] == count) { resp = i + 1; break; } return resp; } /* Halo concentration */ float f_c(float Mv, float Mstar, float z) { float resp; resp = 9.0 / (1.0 + z) * pow(Mv / Mstar, -0.13); return resp; } /* Generate a random number from 0 to Rv following the NFW profile */ float Generate_NFW(float rv, float c, float A, int seed) { float Int, rs, r, rtmp; gsl_rng *rng_ptr; rng_ptr = gsl_rng_alloc(gsl_rng_taus); gsl_rng_set(rng_ptr, seed); Int = gsl_rng_uniform(rng_ptr); rs = rv / c; r = gsl_rng_uniform(rng_ptr); rtmp = r + 1.0; while (fabs(r - rtmp) > 0.001) { rtmp = r; r = r - ((log(1.0 + r * c) - r * c / (1.0 + r * c) - A * Int) * pow(1.0 + r * c, 2)) / (c * (2.0 * r * c + r * r * c * c)); } gsl_rng_free(rng_ptr); return r * rv; } /* Mean value of central galaxies */ float Ncentral(float M, float logMmin, float siglogM) { float resp; resp = 0.5 * (1.0 + erf((log10(M) - logMmin) / siglogM)); return resp; } /* Mean value of satellite galaxies */ float Nsatellite(float M, float logM0, float logM1, float alpha) { float resp; resp = pow((M - pow(10.0, logM0)) / pow(10.0, logM1), alpha); return resp; } int main(int argc, char *argv[]) { FILE *power, *den_grid, *halo_cat, *disp_cat, *light_cat, *collapse_cat; char powerfile[100], denfile[100], halofile[100], dispfile[100], lightfile[100], collapsefile[100]; char DO_2LPT, BARRIER, out_inter, out_halos, out_collapse, DEN_GRID, DISP_CAT, DO_HOD; int i, j, k, nx, ny, nz, nz2, Nmin, N_cores, Nk, Nr, Ncells, seed, cont_tmp, grows, grows_tmp, tmp, nmin, m, m_tmp, nsnap; long np, cont, nh, l; float Lc, Om0, redshift, Growth, dc, EB_a, EB_b, EB_alpha, rhoc, Hz, Omz, rhomz, Mtot, Mcell, Lx, Ly, Lz, klx, kly, klz, Normx, Normk, Dv, kx, ky, kz, kmod, *Sig_grid, sigtmp, std, den, den_tmp, factx, facty, factz, fact, phixx, phixy, phixz, phiyy, phiyz, phizz, Pobs[3], LoS[3], dist_min, dist_max, theta_min, cos_min; double *K, *P, *R, *M, *Sig, Rmin, Rmax, *R_xi, *Xi; fftwf_plan p1, p2; fftwf_complex *deltak, *deltak_tmp; float *delta; int *flag, *sphere; PEAKS *peaks, *tmpp; HALOS *halos; if (argc != 35) { printf("\nWrong number of arguments.\n"); printf("arg1: Name of the power spectrum file.\n"); printf("arg2: Size (in Mpc/h) or mass (in M_odot/h) of each cell.\n"); printf("arg3-5: Number of cells along each direction.\n"); printf("arg6: Some seed for the random number generator.\n"); printf("arg7: Use the 2LPT to move the halos? Yes (1) or No (0).\n"); printf("arg8: The Value of Omega_m today.\n"); printf("arg9: The readshift z.\n"); printf("arg10: The rate between the growth function at the final resdshit and at the redshift of the input power spectrum.\n"); printf("arg11: The value of critical density delta _{c}. Put 0 to use the fit.\n"); printf("arg12: The minimum number of partiles in a halo of the final catalogue.\n"); printf("arg13: The number of cores to use in the parallel parts.\n"); printf("arg14: Prefix for the outputs.\n"); printf("arg15: Which barrier would you like to use to find the halos?\n\tThe statical barrier (SB) (0);\n\tThe ellipsoidal barrier (EB) (1).\n"); printf("arg16: Which intermediate results would you like to save?:\n\tNo one (0);\n\tThe gaussian density grid (1);\n\tThe particles displaced with LPT (2);\n\tBoth (3).\n"); printf("arg17: How do you want the final halo catalogue?\n\tNo halo catalogue (0);\n\tThe positions and velocities in the real space (1);\n\tThe positions and velocities in the real space light cone (2);\n\tThe positions and velocities in redshift space light cone(3).\n"); printf("arg18-20: The three parameters for the ellipsoidal barrier: a, b and alpha.\n"); printf("arg21: Read the density grid (0) or compute it (1)?\n"); printf("arg22: Read the displacement field (0) or compute it (1)?\n"); printf("arg23-25: Position of the observer in units of the box size.\n"); printf("arg26-28: Direction of the line of sight.\n"); printf("arg29-30: Minimum and maximum comoving distance of the halos in this snapshot in the light cone.\n"); printf("arg31: Angular aperture of the light cone in units of pi.\n"); printf("arg32: Save the information about the collapsed particles in the light cone? Yes (1) or No (0).\n"); printf("arg33: Populate the halos with a HOD?\n\tNo (0);\n\tYes, with a single type of galaxy (1)\n\tYes, with multiple types of galaxies(2).\n"); printf("arg34: Number of this snapshot.\n"); exit(0); } /* Get the name of all files */ sprintf(powerfile, "%s", argv[1]); sprintf(denfile, "%s_den.dat", argv[14]); sprintf(halofile, "%s_halos.dat", argv[14]); sprintf(dispfile, "%s_disp.dat", argv[14]); /* * Parameters with specifications of the box and options for this * simulation */ Lc = atof(argv[2]); //Size or mass of each cell nx = atoi(argv[3]); //Number of cells along the x - direction ny = atoi(argv[4]); //Number of cells along the y - direction nz = atoi(argv[5]); //Number of cells along the z - direction seed = atoi(argv[6]); //Seed for the random generator(same seed gives the same final catalogue) DO_2LPT = (char)atoi(argv[7]); //Parameter with the information about the use(or not) of second order lagrangian perturbation theory Nmin = atoi(argv[12]); //Number of particles in the smaller final halo N_cores = atoi(argv[13]); //Number of cores used by openmp in the parallel parts BARRIER = (char)atoi(argv[15]); //Parameter with the information about the utilization(or not) of the EB out_inter = (char)atoi(argv[16]); //Parameter with the information about which intermediate results must be output out_halos = (char)atoi(argv[17]); //Parameter with the information about what to save in the final halo catalogue out_collapse = (char)atoi(argv[32]); //Parameter with the information about the collapsed particles in the light cone DEN_GRID = (char)atoi(argv[21]); //Compute a new density field(1) or just read it from a file(0) ? DISP_CAT = (char)atoi(argv[22]); //Compute the displacement field(1) or just read it from a file(0) ? DO_HOD = (char)atoi(argv[33]); //Populate the halos with no galaxies(0), one type of galaxy(1) or multiple types(2) ? /* Some physical parametrs used in this simulation */ Om0 = atof(argv[8]); //Omega_m value today(z = 0) redshift = atof(argv[9]); //Redshift of the final catalogues Growth = atof(argv[10]); //Ratio between the growth function at the final redshift and the redshift of the inpur power spectrum dc = atof(argv[11]); //Value of the critical density for the halo formation linearly extrapoleted using linear theory to the redshift of the final catalogues /* Parameters for the EB */ EB_a = atof(argv[18]); //Parameter a of the EB EB_b = atof(argv[19]); //Parameter b of the EB EB_alpha = atof(argv[20]); //Parameter alpha of the EB /* Parameters for the construction of the light cone */ Pobs[0] = atof(argv[23]); //Position x of the observer in units of the box size Pobs[1] = atof(argv[24]); //Position y of the observer in units of the box size Pobs[2] = atof(argv[25]); //Position z of the observer in units of the box size LoS[0] = atof(argv[26]); //Component x of the direction of the line of sight LoS[1] = atof(argv[27]); //Component y of the direction of the line of sight LoS[2] = atof(argv[28]); //Component z of the direction of the line of sight /* Normalize the LoS vector */ kmod = 0.0; for (i = 0; i < 3; i++) kmod += LoS[i] * LoS[i]; for (i = 0; i < 3; i++) LoS[i] = LoS[i] / sqrt(kmod); dist_min = atof(argv[29]); //Minimum comoving distance of this slice dist_max = atof(argv[30]); //Maximum comoving distance of this slice theta_min = atof(argv[31]) * M_PI; //Minimum angle theta cos_min = cos(theta_min); //Cossine of the minimum angle theta nsnap = atoi(argv[34]); //Number of this snapshot sprintf(lightfile, "%s_%d_LightCone.dat", argv[14], nsnap); sprintf(collapsefile, "%s_%d_Collapse.dat", argv[14], nsnap); /* Some derived parameters used in this simulation */ rhoc = 2.775e+11; //Critical density in unitis of M_odot / Mpc * h ^ 2 Hz = 100.0 * sqrt(Om0 * pow(1.0 + redshift, 3.0) + (1.0 - Om0)); //Hubble constant at the final redshift Omz = Om0 * pow(1.0 + redshift, 3.0) / (Om0 * pow(1.0 + redshift, 3.0) + (1.0 - Om0)); //Matter contrast density at the final redshift rhomz = Om0 * rhoc; //Matter density at the final redshift Dv = (18 * M_PI * M_PI + 82.0 * (Omz - 1.0) - 39.0 * pow(Omz - 1.0, 2.0)) / Omz; //Overdensity used to put galaxies in the halos if (Lc < Lc_MAX) //If the size of each cell was given compute the mass of each cell Mcell = rhomz * pow(Lc, 3.0); else if (Lc > Mc_MIN) { //If the mass of each cell was given compute the size of each cell Mcell = Lc; Lc = pow(Mcell / rhomz, 1.0 / 3.0); } else { //Notify an unexpected behavior and exit printf("A cell larger than %f [Mpc/h] or with a mass smaller than %e [M_odot/h] is not expected. Please, change this value or change the definition of Lc_MAX and Mc_MIN in the code.\n", Lc_MAX, Mc_MIN); exit(0); } Lx = Lc * nx; //Compute the size of the box along the x - direction Ly = Lc * ny; //Compute the size of the box along the y - direction Lz = Lc * nz; //Compute the size of the box along the z - direction Mtot = rhomz * Lx * Ly * Lz; //Compute the total mass in the box klx = 2.0 * M_PI / Lx; //Compute the fundamental frequency in the x - direction kly = 2.0 * M_PI / Ly; //Compute the fundamental frequency in the y - direction klz = 2.0 * M_PI / Lz; //Compute the fundamental frequency in the z - direction Normx = 1.0 / sqrt(Lx * Ly * Lz); //Compute the normalization needed when aplyed the FFTW3 from k to x space Normk = sqrt(Lx * Ly * Lz) / (nx * ny * nz); //Compute the normalization needed when aplyed the FFTW3 from x to k space nz2 = nz / 2 + 1; //Quantity used to alloc the complex arrays used in the FFTW3 nmin = nx; //Determine the smaller direction if (nmin > ny) nmin = ny; if (nmin > nz) nmin = nz; /* * Compute the number of repetitions of this box to construct the light * cone */ float Pos[3], dist, cost, vr, Mass; int Nrep_x, Nrep_y, Nrep_z; if (out_halos == 2 || out_halos == 3) { Nrep_x = floor(dist_max / Lx) + 1; Nrep_y = floor(dist_max / Ly) + 1; Nrep_z = floor(dist_max / Lz) + 1; } /* Parameters of the HOD model */ int Ngals, Ncen, Nsat; float r, phi, theta, Rv, C, A; float logMmin, siglogM, logM0, logM1, alpha; logMmin = 12.44005264; siglogM = 0.79560376; logM0 = 11.98154109; logM1 = 12.99600074; alpha = 1.13717828; /* Check some inputs before to start */ if (out_inter == 0 && out_halos == 0) { printf("You need to choose something to output! arg16, arg17 and/or arg18 must be >0!\n"); exit(0); } if (nx < 0 || ny < 0 || nz < 0) { printf("You are trying to use n = (%d, %d, %d) and it is not possible!\n", nx, ny, nz); exit(0); } if (DO_2LPT < 0 || DO_2LPT > 1) { printf("You are trying to use DO_2LPT = %d and it is not possible! Setting DO_2LPT = 0.\n", DO_2LPT); DO_2LPT = 0; } if (Growth <= 0.0) { printf("You gave a value of the ratio between the growths of %f and it is not physical!\n", Growth); exit(0); } if (Nmin < 0) { printf("You gave a negative number for the number of particles in the smaller halo (%d). Settin it in 1.\n", Nmin); Nmin = 1; } if (N_cores < 0) { printf("You gave a negative number for the number of cores (%d). Settin it in 1.\n", N_cores); N_cores = 1; } if (BARRIER != 0 && BARRIER != 1) { printf("You need to chose a valid barrier for the void detection! Your choice were %d.\n", BARRIER); exit(0); } if (Om0 > 1.0 || Om0 < 0.0) { printf("Your Omega _{m} = %f! Put some valid value between 0.0 and 1.0.\n", Om0); exit(0); } if (dc < 0.0) { printf("Your delta_{c} = %f < 0. Using the fit.\n", dc); dc = 1.686 * pow(Omz, 0.0055); } if (dc == 0.0) dc = 1.686 * pow(Omz, 0.0055); if (out_halos > 1 && theta_min > 1.0) { printf("Theta min must be equal or smaller than 1! Setting it to 1.\n"); theta_min = 1.0; cos_min = -1.0; } if (out_halos > 1 && LoS[0] == 0.0 && LoS[1] == 0.0 && LoS[2] == 0.0) { printf("You must give a non vanishing vector for the direction of the line of sight!\n"); exit(0); } if (out_collapse == 1 && out_halos < 2) { printf("It is not possible to save the information about the collapsed particles without the creation of a light cone. Ignoring this parameter.\n"); out_collapse = 0; } printf("\nRunning the ExSHalos!\n\ Omega_m = %.3f, z = %.3f, Growth = %.3f, H = %.2f, d_c = %.3f and Delta_virial = %.1f\n\ L = (%.5f, %.5f, %.5f), N_cells = (%d, %d, %d), M_tot = %.5e, M_cell = %.5e and seed = %d.\n", Omz, redshift, Growth, Hz, dc, Dv, Lx, Ly, Lz, nx, ny, nz, Mtot, Mcell, seed); omp_set_num_threads(N_cores); //Set the number of cores used by the openmp /**************************************/ /* Constructing the density grids */ /**************************************/ printf("\nConstructing the density grid in real and fourier space!\n"); /* Opennning the power spectrum file */ power = fopen(powerfile, "r"); if (power == NULL) { printf("Unable to open %s\n", powerfile); exit(0); } /* Measuring the number of k's */ Nk = -1; while (!feof(power)) { fscanf(power, "%f %f", &kx, &ky); Nk++; } rewind(power); /* Reading the power spectrum */ K = (double *)malloc(Nk * sizeof(double)); check_memory(K, "K") P = (double *)malloc(Nk * sizeof(double)); check_memory(P, "P") for (i = 0; i < Nk; i++) { fscanf(power, "%lf %lf", &K[i], &P[i]); P[i] = pow((double)Growth, 2.0) * P[i]; } fclose(power); /* Evaluating the Sigma(R) */ Nr = Nk; R = (double *)malloc(Nr * sizeof(double)); check_memory(R, "R") M = (double *)malloc(Nr * sizeof(double)); check_memory(M, "M") Sig = (double *)malloc(Nr * sizeof(double)); check_memory(Sig, "Sig") Rmin = (double)pow(Mcell * 0.9 * 3.0 / (4.0 * M_PI * rhomz), 1.0 / 3.0); Rmax = (double)pow(M_max * 3.0 / (4.0 * M_PI * rhomz), 1.0 / 3.0); for (i = 0; i < Nr; i++) { R[i] = pow(10, log10(Rmin) + i * (log10(Rmax) - log10(Rmin)) / (Nr - 1)); M[i] = 4.0 / 3.0 * M_PI * (double)rhomz *pow(R[i], 3); } for (i = 0; i < Nr; i++) Sig[i] = sqrt(calc_sigma(K, P, Nk, R[i])); /* Interpolating the Sigma(M) */ gsl_interp_accel *acc = gsl_interp_accel_alloc(); gsl_spline *spline = gsl_spline_alloc(gsl_interp_cspline, Nr); gsl_spline_init(spline, M, Sig, Nr); /* Evaluate the integral of the mass function */ double *Int; Int = (double *)malloc(Nr * sizeof(double)); check_memory(Int, "Int") Int[0] = 0.0; for (i = 1; i < Nr; i++) Int[i] = Int[i - 1] - (log(Sig[i]) - log(Sig[i - 1])) / 2.0 * (fh(Sig[i], 1, (double)dc) / pow(R[i], -3.0) + fh(Sig[i - 1], 1, (double)dc) / pow(R[i - 1], -3.0)); /* * Interpolate the integral of the mass function as function of mass and * its inverse */ gsl_interp_accel *acc_I = gsl_interp_accel_alloc(); gsl_interp_accel *acc_InvI = gsl_interp_accel_alloc(); gsl_spline *spline_I = gsl_spline_alloc(gsl_interp_cspline, Nr); gsl_spline *spline_InvI = gsl_spline_alloc(gsl_interp_cspline, Nr); gsl_spline_init(spline_I, M, Int, Nr); gsl_spline_init(spline_InvI, Int, M, Nr); free(Int); /* Compute the Sigma as function of the number of cells in the halo */ Ncells = floor(M_max / Mcell); Sig_grid = (float *)malloc(Ncells * sizeof(float)); check_memory(Sig_grid, "Sig_grid") Sig_grid[0] = 1e+30; for (i = 1; i < Ncells; i++) Sig_grid[i] = pow(gsl_spline_eval(spline, i * Mcell, acc), 2.0); gsl_spline_free(spline); gsl_interp_accel_free(acc); free(R); free(M); free(Sig); /* Read the density grid */ if (DEN_GRID == 0) { delta = (float *)fftwf_malloc((size_t) nx * (size_t) ny * (size_t) nz * sizeof(float)); check_memory(delta, "delta") printf("Reading the density grid\n"); den_grid = fopen(denfile, "rb"); if (den_grid == NULL) { printf("Unable to open %s\n", denfile); exit(0); } fread(&nx, sizeof(int), 1, den_grid); fread(&ny, sizeof(int), 1, den_grid); fread(&nz, sizeof(int), 1, den_grid); fread(&Lc, sizeof(float), 1, den_grid); for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; fread(&delta[ind], sizeof(float), 1, den_grid); delta[ind] = Growth * delta[ind]; } fclose(den_grid); } /* Construct the density grid */ if (DEN_GRID == 1) { /* Compute the Power spectrum in the box */ R_xi = (double *)malloc(Nk * sizeof(double)); check_memory(R_xi, "R_xi") Xi = (double *)malloc(Nk * sizeof(double)); check_memory(Xi, "Xi") pk2xi(Nk, K, P, R_xi, Xi); for (i = 0; i < Nk; i++) if (R_xi[i] > (double)pow(Lx * Ly * Lz, 1.0 / 3.0) / 2.0) Xi[i] = 0.0; xi2pk(Nk, R_xi, Xi, K, P); free(R_xi); free(Xi); /* Interpolate the power spectrum */ acc = gsl_interp_accel_alloc(); spline = gsl_spline_alloc(gsl_interp_cspline, Nk); gsl_spline_init(spline, K, P, Nk); free(K); free(P); /* Allocating the density grids */ delta = (float *)fftwf_malloc((size_t) nx * (size_t) ny * (size_t) nz * sizeof(float)); check_memory(delta, "delta") deltak_tmp = (fftwf_complex *) fftwf_malloc((size_t) nx * (size_t) ny * (size_t) nz2 * sizeof(fftwf_complex)); check_memory(deltak_tmp, "deltak_tmp") deltak = (fftwf_complex *) fftwf_malloc((size_t) nx * (size_t) ny * (size_t) nz2 * sizeof(fftwf_complex)); check_memory(deltak, "deltak") /* Alloc the needed quantities for the random generator */ gsl_rng *rng_ptr; rng_ptr = gsl_rng_alloc(gsl_rng_taus); gsl_rng_set(rng_ptr, seed); /* Constructing the Fourier space density grid */ for (i = 0; i < nx; i++) { if (2 * i < nx) kx = (float)i *klx; else kx = (float)(i - nx) * klx; for (j = 0; j < ny; j++) { if (2 * j < ny) ky = (float)j *kly; else ky = (float)(j - ny) * kly; for (k = 0; k < nz2; k++) { kz = (float)k *klz; if (k == nz / 2) kz = -(float)nz / 2.0 * klz; size_t ind = (size_t) (i * ny + j) * (size_t) nz2 + (size_t) k; kmod = sqrt(kx * kx + ky * ky + kz * kz); if (kmod == 0.0) kmod = pow(klx * kly * klz, 1.0 / 3.0) / 4.0; std = sqrt(gsl_spline_eval(spline, kmod, acc) / 2.0); /* Generate Gaussian random number with std */ deltak[ind][0] = (float)gsl_ran_gaussian(rng_ptr, std); deltak[ind][1] = (float)gsl_ran_gaussian(rng_ptr, std); deltak_tmp[ind][0] = deltak[ind][0]; deltak_tmp[ind][1] = deltak[ind][1]; if (isnan(deltak_tmp[ind][0])) printf("Problem with deltak_tmp[%ld][0]\n", ind); if (isnan(deltak_tmp[ind][1])) printf("Problem with deltak_tmp[%ld][1]\n", ind); } } } gsl_spline_free(spline); gsl_interp_accel_free(acc); gsl_rng_free(rng_ptr); /* Execute the FFTW3 to compute the density grid in real space */ p1 = fftwf_plan_dft_c2r_3d(nx, ny, nz, deltak_tmp, delta, FFTW_ESTIMATE); fftwf_execute(p1); fftwf_free(deltak_tmp); /* Save the density grid */ if (out_inter == 1 || out_inter == 3) { printf("Saving the density grid\n"); den_grid = fopen(denfile, "wb"); if (den_grid == NULL) { printf("Unable to open %s\n", denfile); exit(0); } fwrite(&nx, sizeof(int), 1, den_grid); fwrite(&ny, sizeof(int), 1, den_grid); fwrite(&nz, sizeof(int), 1, den_grid); fwrite(&Lc, sizeof(float), 1, den_grid); for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; fwrite(&delta[ind], sizeof(float), 1, den_grid); } fclose(den_grid); } } /* Compute the mean and std of the linear density field */ kx = 0.0; ky = 0.0; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; delta[ind] = delta[ind] * Normx; kx += delta[ind] * delta[ind]; ky += delta[ind]; } kx = kx / ((float)nx * (float)ny * (float)nz); ky = ky / ((float)nx * (float)ny * (float)nz); printf("Mean = %f and Sigma = %f\n", ky, sqrt(kx - ky * ky)); /*************************/ /* Finding the halos */ /*************************/ if (out_halos != 0) { printf("\nFinding the spherical halos!\n"); /* Alloc the flag array */ flag = (int *)malloc((size_t) nx * (size_t) ny * (size_t) nz * sizeof(int)); check_memory(flag, "flag") /* Initialize the flag array */ for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; flag[ind] = -1; } /* Counting the number of peaks */ np = 0; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; den = delta[ind]; if (den > delta[(size_t) (cysum(i, 1, nx) * ny + j) * (size_t) nz + (size_t) k] && den > delta[(size_t) (cysum(i, -1, nx) * ny + j) * (size_t) nz + (size_t) k] && den > delta[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] && den > delta[(size_t) (i * ny + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] && den > delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] && den > delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -1, nz)]) np++; } /* Alloc the array with the peaks and final halos */ peaks = (PEAKS *) malloc(np * sizeof(PEAKS)); halos = (HALOS *) malloc(np * sizeof(HALOS)); cont = 0; /* Save the position and density of each peak */ for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; den = delta[ind]; if (den > delta[(size_t) (cysum(i, 1, nx) * ny + j) * (size_t) nz + (size_t) k] && den > delta[(size_t) (cysum(i, -1, nx) * ny + j) * (size_t) nz + (size_t) k] && den > delta[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] && den > delta[(size_t) (i * ny + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] && den > delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] && den > delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -1, nz)]) { peaks[cont].x[0] = i; peaks[cont].x[1] = j; peaks[cont].x[2] = k; peaks[cont].den = den; cont++; } } /* Check the new number of peaks and elements in the peaks array */ if (cont != np) { printf("The number of peaks does not match. %ld != %ld!\n", np, cont); exit(0); } /* Sort the peaks */ quickSort_peaks(peaks, 0, np - 1); /* Grow the spherical halos around the density peaks */ nh = 0; printf("We have %ld peaks\n", np); for (l = 0; l < np; l++) { /* If this peak is already in a halo jump to teh next one */ if (flag[(size_t) (peaks[l].x[0] * ny + peaks[l].x[1]) * (size_t) nz + (size_t) peaks[l].x[2]] != -1) continue; /* * Check if this peak is near to the slice used to construct the * light cone */ if (out_halos == 2 || out_halos == 3) { m = 1; for (i = -Nrep_x; i <= Nrep_x; i++) for (j = -Nrep_y; j <= Nrep_y; j++) for (k = -Nrep_z; k <= Nrep_z; k++) { /* Compute the distance for this replic */ Pos[0] = (peaks[l].x[0] + 0.5) * Lc + Lx * i - Pobs[0]; Pos[1] = (peaks[l].x[1] + 0.5) * Lc + Ly * j - Pobs[1]; Pos[2] = (peaks[l].x[2] + 0.5) * Lc + Lz * k - Pobs[2]; dist = 0.0; for (m = 0; m < 3; m++) dist += Pos[m] * Pos[m]; dist = sqrt(dist); if (dist <= dist_min - Rmax || dist > dist_max + Rmax) m = 0; /* Compute the angle theta */ cost = 0.0; for (m = 0; m < 3; m++) cost += Pos[m] * LoS[m]; cost = cost / dist; if (theta_min + Rmax / dist < M_PI && cost < cos(theta_min + Rmax / dist)) m = 0; } if (m == 0) continue; } den = peaks[l].den; den_tmp = peaks[l].den; cont = 0; cont_tmp = 1; grows_tmp = 0; /* Grows the shells up to the minimum of the barrier */ while (den_tmp >= Barrier(Sig_grid[Ncells - 1], dc, BARRIER, EB_a, EB_b, EB_alpha)) { if (cont < cont_tmp) grows = grows_tmp; grows_tmp++; den = den_tmp; cont = cont_tmp; den_tmp = den * (float)cont; tmp = floor(sqrt((double)grows_tmp)); if (tmp > nmin / 2) tmp = nmin / 2; for (i = -tmp; i <= tmp; i++) for (j = -tmp; j <= tmp; j++) for (k = -tmp; k <= tmp; k++) if (dist2(i, j, k) == grows_tmp) { size_t ind = (size_t) (cysum(peaks[l].x[0], i, nx) * ny + cysum(peaks[l].x[1], j, ny)) * (size_t) nz + (size_t) cysum(peaks[l].x[2], k, nz); if (flag[ind] != -1) den_tmp += -Mtot; else den_tmp += delta[ind]; cont_tmp++; } den_tmp = den_tmp / (float)cont_tmp; } /* Decrease the shells up to the correct value of the barrier */ while (den < Barrier(Sig_grid[cont], dc, BARRIER, EB_a, EB_b, EB_alpha) && cont > 0) { den_tmp = den; cont_tmp = cont; den = den * (float)cont; tmp = floor(sqrt((double)grows)); if (tmp > nmin / 2) tmp = nmin / 2; for (i = -tmp; i <= tmp; i++) for (j = -tmp; j <= tmp; j++) for (k = -tmp; k <= tmp; k++) if (dist2(i, j, k) == grows) { size_t ind = (size_t) (cysum(peaks[l].x[0], i, nx) * ny + cysum(peaks[l].x[1], j, ny)) * (size_t) nz + (size_t) cysum(peaks[l].x[2], k, nz); den -= delta[ind]; cont--; } if (cont > 0) den = den / (float)cont; if (cont < cont_tmp) grows_tmp = grows; grows--; } if (cont == 0) continue; /* Put the correct flags to the cells */ tmp = floor(sqrt((double)grows_tmp)); for (i = -tmp; i <= tmp; i++) for (j = -tmp; j <= tmp; j++) for (k = -tmp; k <= tmp; k++) if (dist2(i, j, k) < grows_tmp) { size_t ind = (size_t) (cysum(peaks[l].x[0], i, nx) * ny + cysum(peaks[l].x[1], j, ny)) * (size_t) nz + (size_t) cysum(peaks[l].x[2], k, nz); if (flag[ind] != -1) printf("(1): This flag != -1! Flag = %d and the new one is %ld\n", flag[ind], nh); flag[ind] = nh; } /* Save the halo information */ if (cont >= Nmin) { halos[nh].cont = cont; for (i = 0; i < 3; i++) halos[nh].x[i] = peaks[l].x[i]; nh++; } else { for (i = -tmp; i <= tmp; i++) for (j = -tmp; j <= tmp; j++) for (k = -tmp; k <= tmp; k++) if (dist2(i, j, k) < grows_tmp) { size_t ind = (size_t) (cysum(peaks[l].x[0], i, nx) * ny + cysum(peaks[l].x[1], j, ny)) * (size_t) nz + (size_t) cysum(peaks[l].x[2], k, nz); flag[ind] = -2; } } } free(peaks); free(Sig_grid); /* * Find the possible number of particles in a halo sphere = (int * *)malloc(Ncells*sizeof(int)); m = 0; for(l=0;l<10000;l++){ * if(l%100 == 0) printf("l = %ld\n", l); * * tmp = floor(sqrt((float) l)); cont = 0; * * for(i=-tmp;i<=tmp;i++) for(j=-tmp;j<=tmp;j++) for(k=-tmp;k<=tmp;k++) * if(dist2(i, j, k) == l) cont ++; * * if(cont > 0){ if(m > 0) sphere[m] = sphere[m-1] + cont; else * phere[m] = cont; * * m ++; } } * /*Save this information den_grid = fopen("Spheres.dat", "wb"); if * (den_grid == NULL) { printf("Unable to open spheres.dat\n"); * exit(0); } * * fwrite(&m, sizeof(int), 1, den_grid); for(i=0;i<m;i++) * fwrite(&sphere[i], sizeof(int), 1, den_grid); fclose(den_grid); */ /* Read the data with the number of cells in each sphere */ den_grid = fopen("Spheres.dat", "rb"); if (den_grid == NULL) { printf("Unable to open spheres.dat\n"); exit(0); } fread(&m, sizeof(int), 1, den_grid); sphere = (int *)malloc(m * sizeof(int)); for (i = 0; i < m; i++) fread(&sphere[i], sizeof(int), 1, den_grid); fclose(den_grid); printf("We have %ld halos\n", nh); } /********************************/ /* Displacing the particles */ /********************************/ printf("\nDisplacing the particles using 1LPT!\n"); /* * Define the arrays to store the final position, velocity and mass of * each halo */ float **velh, **posh, *Massh; if (out_halos != 0) { gsl_rng *rng_ptr; rng_ptr = gsl_rng_alloc(gsl_rng_taus); gsl_rng_set(rng_ptr, seed); Massh = (float *)malloc(nh * sizeof(float)); velh = (float **)malloc(nh * sizeof(float *)); posh = (float **)malloc(nh * sizeof(float *)); for (i = 0; i < nh; i++) { velh[i] = (float *)malloc(3 * sizeof(float)); posh[i] = (float *)malloc(3 * sizeof(float)); for (j = 0; j < 3; j++) { posh[i][j] = 0.0; velh[i][j] = 0.0; } cont = Next_Count(sphere, Ncells, halos[i].cont); den_tmp = gsl_spline_eval(spline_I, halos[i].cont * Mcell, acc_I) + (gsl_spline_eval(spline_I, sphere[cont] * Mcell, acc_I) - gsl_spline_eval(spline_I, halos[i].cont * Mcell, acc_I)) * gsl_rng_uniform(rng_ptr); Massh[i] = gsl_spline_eval(spline_InvI, den_tmp, acc_InvI); } free(sphere); } /* Read the displacement field */ if (DISP_CAT == 0) { /* Open the output file for the displacement field */ printf("Reading the displacement field\n"); disp_cat = fopen(dispfile, "rb"); if (disp_cat == NULL) { printf("Unable to open %s\n", dispfile); exit(0); } fread(&nx, sizeof(int), 1, disp_cat); fread(&ny, sizeof(int), 1, disp_cat); fread(&nz, sizeof(int), 1, disp_cat); fread(&Lc, sizeof(float), 1, disp_cat); /* Read the displacement and add to each halo */ for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; if (DO_2LPT == 0) { fread(&kx, sizeof(float), 1, disp_cat); fread(&ky, sizeof(float), 1, disp_cat); fread(&kz, sizeof(float), 1, disp_cat); if (out_halos != 0) { tmp = flag[ind]; if (tmp < 0) continue; posh[tmp][0] += Growth * kx; posh[tmp][1] += Growth * ky; posh[tmp][2] += Growth * kz; velh[tmp][0] += Growth * pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * kx; velh[tmp][1] += Growth * pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * ky; velh[tmp][2] += Growth * pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * kz; } } else { fread(&kx, sizeof(float), 1, disp_cat); fread(&ky, sizeof(float), 1, disp_cat); fread(&kz, sizeof(float), 1, disp_cat); fread(&factx, sizeof(float), 1, disp_cat); fread(&facty, sizeof(float), 1, disp_cat); fread(&factz, sizeof(float), 1, disp_cat); if (out_halos != 0) { tmp = flag[ind]; if (tmp < 0) continue; posh[tmp][0] += Growth * (kx - Growth * 3.0 / 7.0 * pow(Omz, -1.0 / 143) * factx); posh[tmp][1] += Growth * (ky - Growth * 3.0 / 7.0 * pow(Omz, -1.0 / 143) * facty); posh[tmp][2] += Growth * (kz - Growth * 3.0 / 7.0 * pow(Omz, -1.0 / 143) * factz); velh[tmp][0] += Growth * (pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * kx); //-Growth * 3.0 / 7.0 * pow(Omz, -1.0 / 143) * 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * factx); velh[tmp][1] += Growth * (pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * ky); //-Growth * 3.0 / 7.0 * pow(Omz, -1.0 / 143) * 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * facty); velh[tmp][2] += Growth * (pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * kz); //-Growth * 3.0 / 7.0 * pow(Omz, -1.0 / 143) * 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * factz); } } } fclose(disp_cat); } /* Compute the displacement field */ if (DISP_CAT == 1) { /* Define the arrays with the displacement field used in 2LPT */ float *S1, *S2, *S3; if (DO_2LPT == 1) { S1 = (float *)malloc((size_t) nx * (size_t) ny * (size_t) nz * sizeof(float)); S2 = (float *)malloc((size_t) nx * (size_t) ny * (size_t) nz * sizeof(float)); S3 = (float *)malloc((size_t) nx * (size_t) ny * (size_t) nz * sizeof(float)); } /* Alloc deltak */ if (DEN_GRID == 0) { deltak = (fftwf_complex *) fftwf_malloc((size_t) nx * (size_t) ny * (size_t) nz2 * sizeof(fftwf_complex)); check_memory(deltak, "deltak") } /* Redefine the FFTW3 plan to compute the displacements */ fftwf_destroy_plan(p1); p1 = NULL; p1 = fftwf_plan_dft_c2r_3d(nx, ny, nz, deltak, delta, FFTW_ESTIMATE); /* Divide the fourier space density by the green's function */ for (i = 0; i < nx; i++) { if (2 * i < nx) kx = i * klx; else kx = (i - nx) * klx; factx = 1.0 / 90.0 * (2.0 * cos(3.0 * kx * Lc) - 27.0 * cos(2.0 * kx * Lc) + 270.0 * cos(kx * Lc) - 245.0) / (Lc * Lc); for (j = 0; j < ny; j++) { if (2 * j < ny) ky = j * kly; else ky = (j - ny) * kly; facty = 1.0 / 90.0 * (2.0 * cos(3.0 * ky * Lc) - 27.0 * cos(2.0 * ky * Lc) + 270.0 * cos(ky * Lc) - 245.0) / (Lc * Lc); for (k = 0; k < nz2; k++) { kz = k * klz; if (k == nz / 2) kz = -(float)nz / 2.0 * klz; factz = 1.0 / 90.0 * (2.0 * cos(3.0 * kz * Lc) - 27.0 * cos(2.0 * kz * Lc) + 270.0 * cos(kz * Lc) - 245.0) / (Lc * Lc); size_t ind = (size_t) (i * ny + j) * (size_t) nz2 + (size_t) k; if (kx != 0.0 || ky != 0.0 || kz != 0.0) { fact = factx + facty + factz; deltak[ind][0] = deltak[ind][0] / fact; deltak[ind][1] = deltak[ind][1] / fact; } else { deltak[ind][0] = 0.0; deltak[ind][1] = 0.0; } } } } /* Compute the potential at first order */ fftwf_execute(p1); /* * Compute the first order displacements and update the position and * velocity of each halo */ if (DO_2LPT == 1) { for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; S1[ind] = -(1.0 * delta[(size_t) (cysum(i, 3, nx) * ny + j) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (cysum(i, 2, nx) * ny + j) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (cysum(i, 1, nx) * ny + j) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (cysum(i, -1, nx) * ny + j) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (cysum(i, -2, nx) * ny + j) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (cysum(i, -3, nx) * ny + j) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); S2[ind] = -(1.0 * delta[(size_t) (i * ny + cysum(j, 3, ny)) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (i * ny + cysum(j, 2, ny)) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (i * nx + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (i * ny + cysum(j, -2, ny)) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (i * ny + cysum(j, -3, ny)) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); S3[ind] = -(1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 3, nz)] - 9.0 * delta[(size_t) (i * ny + j) * nz + (size_t) cysum(k, 2, nz)] + 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] - 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + cysum(k, -1, nz)] + 9.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -2, nz)] - 1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -3, nz)]) * Normx / (60.0 * Lc); if (out_halos != 0) { tmp = flag[ind]; if (tmp < 0) continue; posh[tmp][0] += S1[ind]; posh[tmp][1] += S2[ind]; posh[tmp][2] += S3[ind]; velh[tmp][0] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * S1[ind]; velh[tmp][1] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * S2[ind]; velh[tmp][2] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * S3[ind]; } } } else { /* Open the output file for the displacement field */ if (out_inter == 2 || out_inter == 3) { printf("Saving the displaced particles\n"); disp_cat = fopen(dispfile, "wb"); if (disp_cat == NULL) { printf("Unable to open %s\n", dispfile); exit(0); } fwrite(&nx, sizeof(int), 1, disp_cat); fwrite(&ny, sizeof(int), 1, disp_cat); fwrite(&nz, sizeof(int), 1, disp_cat); fwrite(&Lc, sizeof(float), 1, disp_cat); } for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; /* save the displacement field */ if (out_inter == 2 || out_inter == 3) { kx = -(1.0 * delta[(size_t) (cysum(i, 3, nx) * ny + j) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (cysum(i, 2, nx) * ny + j) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (cysum(i, 1, nx) * ny + j) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (cysum(i, -1, nx) * ny + j) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (cysum(i, -2, nx) * ny + j) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (cysum(i, -3, nx) * ny + j) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); ky = -(1.0 * delta[(size_t) (i * ny + cysum(j, 3, ny)) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (i * ny + cysum(j, 2, ny)) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (i * nx + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (i * ny + cysum(j, -2, ny)) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (i * ny + cysum(j, -3, ny)) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); kz = -(1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 3, nz)] - 9.0 * delta[(size_t) (i * ny + j) * nz + (size_t) cysum(k, 2, nz)] + 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] - 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + cysum(k, -1, nz)] + 9.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -2, nz)] - 1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -3, nz)]) * Normx / (60.0 * Lc); fwrite(&kx, sizeof(float), 1, disp_cat); fwrite(&ky, sizeof(float), 1, disp_cat); fwrite(&kz, sizeof(float), 1, disp_cat); if (out_halos != 0) { tmp = flag[ind]; if (tmp < 0) continue; posh[tmp][0] += kx; posh[tmp][1] += ky; posh[tmp][2] += kz; velh[tmp][0] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * kx; velh[tmp][1] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * ky; velh[tmp][2] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * kz; } } /* Do not save the displacements */ else if (out_halos != 0) { tmp = flag[ind]; if (tmp < 0) continue; kx = -(1.0 * delta[(size_t) (cysum(i, 3, nx) * ny + j) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (cysum(i, 2, nx) * ny + j) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (cysum(i, 1, nx) * ny + j) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (cysum(i, -1, nx) * ny + j) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (cysum(i, -2, nx) * ny + j) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (cysum(i, -3, nx) * ny + j) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); ky = -(1.0 * delta[(size_t) (i * ny + cysum(j, 3, ny)) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (i * ny + cysum(j, 2, ny)) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (i * nx + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (i * ny + cysum(j, -2, ny)) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (i * ny + cysum(j, -3, ny)) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); kz = -(1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 3, nz)] - 9.0 * delta[(size_t) (i * ny + j) * nz + (size_t) cysum(k, 2, nz)] + 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] - 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + cysum(k, -1, nz)] + 9.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -2, nz)] - 1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -3, nz)]) * Normx / (60.0 * Lc); posh[tmp][0] += kx; posh[tmp][1] += ky; posh[tmp][2] += kz; velh[tmp][0] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * kx; velh[tmp][1] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * ky; velh[tmp][2] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * kz; } } if (out_inter == 2 || out_inter == 3) fclose(disp_cat); } if (DO_2LPT == 1) { printf("Displacing the particles using 2LPT!\n"); /* Evaluating the second order contribution */ p2 = fftwf_plan_dft_r2c_3d(nx, ny, nz, delta, deltak, FFTW_ESTIMATE); /* Compute the second order "density" */ for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { phixx = (1.0 * S1[(size_t) (cysum(i, 3, nx) * ny + j) * (size_t) nz + (size_t) k] - 9.0 * S1[(size_t) (cysum(i, 2, nx) * ny + j) * (size_t) nz + (size_t) k] + 45.0 * S1[(size_t) (cysum(i, 1, nx) * ny + j) * (size_t) nz + (size_t) k] - 45.0 * S1[(size_t) (cysum(i, -1, nx) * ny + j) * (size_t) nz + (size_t) k] + 9.0 * S1[(size_t) (cysum(i, -2, nx) * ny + j) * (size_t) nz + (size_t) k] - 1.0 * S1[(size_t) (cysum(i, -3, nx) * ny + j) * (size_t) nz + (size_t) k]) / (60.0 * Lc); phixy = (1.0 * S1[(size_t) (i * ny + cysum(j, 3, ny)) * (size_t) nz + (size_t) k] - 9.0 * S1[(size_t) (i * ny + cysum(j, 2, ny)) * (size_t) nz + (size_t) k] + 45.0 * S1[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] - 45.0 * S1[(size_t) (i * nx + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] + 9.0 * S1[(size_t) (i * ny + cysum(j, -2, ny)) * (size_t) nz + (size_t) k] - 1.0 * S1[(size_t) (i * ny + cysum(j, -3, ny)) * (size_t) nz + (size_t) k]) / (60.0 * Lc); phixz = (1.0 * S1[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 3, nz)] - 9.0 * S1[(size_t) (i * ny + j) * nz + (size_t) cysum(k, 2, nz)] + 45.0 * S1[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] - 45.0 * S1[(size_t) (i * ny + j) * (size_t) nz + cysum(k, -1, nz)] + 9.0 * S1[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -2, nz)] - 1.0 * S1[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -3, nz)]) / (60.0 * Lc); phiyy = (1.0 * S2[(size_t) (i * ny + cysum(j, 3, ny)) * (size_t) nz + (size_t) k] - 9.0 * S2[(size_t) (i * ny + cysum(j, 2, ny)) * (size_t) nz + (size_t) k] + 45.0 * S2[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] - 45.0 * S2[(size_t) (i * nx + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] + 9.0 * S2[(size_t) (i * ny + cysum(j, -2, ny)) * (size_t) nz + (size_t) k] - 1.0 * S2[(size_t) (i * ny + cysum(j, -3, ny)) * (size_t) nz + (size_t) k]) / (60.0 * Lc); phiyz = (1.0 * S2[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 3, nz)] - 9.0 * S2[(size_t) (i * ny + j) * nz + (size_t) cysum(k, 2, nz)] + 45.0 * S2[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] - 45.0 * S2[(size_t) (i * ny + j) * (size_t) nz + cysum(k, -1, nz)] + 9.0 * S2[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -2, nz)] - 1.0 * S2[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -3, nz)]) / (60.0 * Lc); phizz = (1.0 * S3[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 3, nz)] - 9.0 * S3[(size_t) (i * ny + j) * nz + (size_t) cysum(k, 2, nz)] + 45.0 * S3[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] - 45.0 * S3[(size_t) (i * ny + j) * (size_t) nz + cysum(k, -1, nz)] + 9.0 * S3[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -2, nz)] - 1.0 * S3[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -3, nz)]) / (60.0 * Lc); delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) k] = 1.0 * (phixx * phiyy + phixx * phizz + phiyy * phizz - pow(phixy, 2.0) - pow(phixz, 2.0) - pow(phiyz, 2.0)); } /* Go to fourier space to solve the posson equation */ fftwf_execute(p2); /* Divide the fourier space density by the green's function */ for (i = 0; i < nx; i++) { if (2 * i < nx) kx = i * klx; else kx = (i - nx) * klx; factx = 1.0 / 90.0 * (2.0 * cos(3.0 * kx * Lc) - 27.0 * cos(2.0 * kx * Lc) + 270.0 * cos(kx * Lc) - 245.0) / (Lc * Lc); for (j = 0; j < ny; j++) { if (2 * j < ny) ky = j * kly; else ky = (j - ny) * kly; facty = 1.0 / 90.0 * (2.0 * cos(3.0 * ky * Lc) - 27.0 * cos(2.0 * ky * Lc) + 270.0 * cos(ky * Lc) - 245.0) / (Lc * Lc); for (k = 0; k < nz2; k++) { kz = k * klz; if (k == nz / 2) kz = -(float)nz / 2.0 * klz; factz = 1.0 / 90.0 * (2.0 * cos(3.0 * kz * Lc) - 27.0 * cos(2.0 * kz * Lc) + 270.0 * cos(kz * Lc) - 245.0) / (Lc * Lc); size_t ind = (size_t) (i * ny + j) * (size_t) nz2 + (size_t) k; if (kx != 0.0 || ky != 0.0 || kz != 0.0) { fact = factx + facty + factz; deltak[ind][0] = deltak[ind][0] / fact * Normk; deltak[ind][1] = deltak[ind][1] / fact * Normk; } else { deltak[ind][0] = 0.0; deltak[ind][1] = 0.0; } } } } /* Come back to real space */ fftwf_execute(p1); /* Open the output file for the displacement field */ if (out_inter == 2 || out_inter == 3) { printf("Saving the displaced particles\n"); disp_cat = fopen(dispfile, "wb"); if (disp_cat == NULL) { printf("Unable to open %s\n", dispfile); exit(0); } fwrite(&nx, sizeof(int), 1, disp_cat); fwrite(&ny, sizeof(int), 1, disp_cat); fwrite(&nz, sizeof(int), 1, disp_cat); fwrite(&Lc, sizeof(float), 1, disp_cat); } /* Compute the second order displacements and velocities */ for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; /* save the displacement field */ if (out_inter == 2 || out_inter == 3) { kx = (1.0 * delta[(size_t) (cysum(i, 3, nx) * ny + j) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (cysum(i, 2, nx) * ny + j) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (cysum(i, 1, nx) * ny + j) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (cysum(i, -1, nx) * ny + j) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (cysum(i, -2, nx) * ny + j) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (cysum(i, -3, nx) * ny + j) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); ky = (1.0 * delta[(size_t) (i * ny + cysum(j, 3, ny)) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (i * ny + cysum(j, 2, ny)) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (i * nx + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (i * ny + cysum(j, -2, ny)) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (i * ny + cysum(j, -3, ny)) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); kz = (1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 3, nz)] - 9.0 * delta[(size_t) (i * ny + j) * nz + (size_t) cysum(k, 2, nz)] + 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] - 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + cysum(k, -1, nz)] + 9.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -2, nz)] - 1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -3, nz)]) * Normx / (60.0 * Lc); fwrite(&S1[ind], sizeof(float), 1, disp_cat); fwrite(&S2[ind], sizeof(float), 1, disp_cat); fwrite(&S3[ind], sizeof(float), 1, disp_cat); fwrite(&kx, sizeof(float), 1, disp_cat); fwrite(&ky, sizeof(float), 1, disp_cat); fwrite(&kz, sizeof(float), 1, disp_cat); if (out_halos != 0) { tmp = flag[ind]; if (tmp < 0) continue; kx = -3.0 / 7.0 * pow(Omz, -1.0 / 143) * kx; ky = -3.0 / 7.0 * pow(Omz, -1.0 / 143) * ky; kz = -3.0 / 7.0 * pow(Omz, -1.0 / 143) * kz; posh[tmp][0] += kx; posh[tmp][1] += ky; posh[tmp][2] += kz; //velh[tmp][0] += 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * kx; //velh[tmp][1] += 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * ky; //velh[tmp][2] += 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * kz; } } /* Do not save the displacements */ else if (out_halos != 0) { tmp = flag[ind]; if (tmp < 0) continue; kx = (1.0 * delta[(size_t) (cysum(i, 3, nx) * ny + j) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (cysum(i, 2, nx) * ny + j) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (cysum(i, 1, nx) * ny + j) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (cysum(i, -1, nx) * ny + j) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (cysum(i, -2, nx) * ny + j) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (cysum(i, -3, nx) * ny + j) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); ky = (1.0 * delta[(size_t) (i * ny + cysum(j, 3, ny)) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (i * ny + cysum(j, 2, ny)) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (i * nx + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (i * ny + cysum(j, -2, ny)) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (i * ny + cysum(j, -3, ny)) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); kz = (1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 3, nz)] - 9.0 * delta[(size_t) (i * ny + j) * nz + (size_t) cysum(k, 2, nz)] + 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] - 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + cysum(k, -1, nz)] + 9.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -2, nz)] - 1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -3, nz)]) * Normx / (60.0 * Lc); kx = -3.0 / 7.0 * pow(Omz, -1.0 / 143) * kx; ky = -3.0 / 7.0 * pow(Omz, -1.0 / 143) * ky; kz = -3.0 / 7.0 * pow(Omz, -1.0 / 143) * kz; posh[tmp][0] += kx; posh[tmp][1] += ky; posh[tmp][2] += kz; //velh[tmp][0] += 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * kx; //velh[tmp][1] += 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * ky; //velh[tmp][2] += 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * kz; } } if (out_inter == 2 || out_inter == 3) fclose(disp_cat); /* Free the FFTW memory */ fftwf_destroy_plan(p2); free(S1); free(S2); free(S3); } fftwf_destroy_plan(p1); fftwf_free(deltak); } fftwf_free(delta); if (out_collapse == 0 && out_halos != 0) free(flag); /* Compute the final position and velocity of the halos */ if (out_halos != 0) { for (i = 0; i < nh; i++) { posh[i][0] = cysumf(halos[i].x[0] * Lc + Lc / 2.0, posh[i][0] / halos[i].cont, Lx); posh[i][1] = cysumf(halos[i].x[1] * Lc + Lc / 2.0, posh[i][1] / halos[i].cont, Ly); posh[i][2] = cysumf(halos[i].x[2] * Lc + Lc / 2.0, posh[i][2] / halos[i].cont, Lz); velh[i][0] = velh[i][0] / halos[i].cont; velh[i][1] = velh[i][1] / halos[i].cont; velh[i][2] = velh[i][2] / halos[i].cont; } } /* Saving the positions and velocities in real space */ if (out_halos == 1) { printf("Saving the halos\n"); halo_cat = fopen(halofile, "w"); if (halo_cat == NULL) { printf("Unable to open %s\n", halofile); exit(0); } fprintf(halo_cat, "%ld\n", nh); for (i = 0; i < nh; i++) { fprintf(halo_cat, "%f %f %f %f %f %f %e %d\n", posh[i][0], posh[i][1], posh[i][2], velh[i][0], velh[i][1], velh[i][2], Massh[i], halos[i].cont); } fclose(halo_cat); } /* Putting galaxies in the halos */ if (DO_HOD == 1) { printf("Saving the galaxies\n"); sprintf(halofile, "%s_gals.dat", argv[14]); halo_cat = fopen(halofile, "w"); if (halo_cat == NULL) { printf("Unable to open %s\n", halofile); exit(0); } cont = 0; for (i = 0; i < nh; i++) { /* Compute the number of central and satellite galaxies */ if (Ncentral(Massh[i], logMmin, siglogM) >= gsl_rng_uniform(rng_ptr)) Ncen = 1; else Ncen = 0; Nsat = gsl_ran_poisson(rng_ptr, (double)Nsatellite(Massh[i], logM0, logM1, alpha)); Ngals = Ncen + Nsat; if (Ngals == 0) continue; /* Save the central galaxy */ if (Ncen == 1) { fprintf(halo_cat, "%f %f %f %f %f %f %d\n", posh[i][0], posh[i][1], posh[i][2], velh[i][0], velh[i][1], velh[i][2], i); cont++; } /* Put the satellite galaxies following the NFW profile */ if (Nsat > 0) { Rv = pow(3.0 * Massh[i] / (4.0 * M_PI * Dv * rhom), 1.0 / 3.0); C = f_c(Massh[i], (float)Mstar, z); A = log(1.0 + C) - C / (1.0 + C); } for (j = 0; j < Nsat; j++) { phi = 2.0 * M_PI * gsl_rng_uniform(rng_ptr); theta = M_PI * gsl_rng_uniform(rng_ptr); r = Generate_NFW(Rv, C, A, seed); kx = cysumf(posh[i][0], r * sin(theta) * cos(phi), Lx); ky = cysumf(posh[i][1], r * sin(theta) * sin(phi), Ly); kz = cysumf(posh[i][2], r * cos(theta), Lz); fprintf(halo_cat, "%f %f %f ", kx, ky, kz); kx = velh[i][0]; ky = velh[i][1]; kz = velh[i][2]; fprintf(halo_cat, "%f %f %f %d\n", kx, ky, kz, i); cont++; } } fclose(halo_cat); n_bar = cont / (Lx * Ly * Lz); printf("n_bar = %f\n", n_bar); } /********************************/ /* Put the halos in the lightcone */ /********************************/ if (out_halos == 2 || out_halos == 3) { printf("\nPutting the halos in the light cone!\n"); printf("The code is using (%d, %d, %d) replicas to construct the light cone.\n", Nrep_x, Nrep_y, Nrep_z); printf("This snapshot is in the range %f - %f [Mpc/h] with theta_min = %f.\n", dist_min, dist_max, theta_min); /* Open the light cone file */ light_cat = fopen(lightfile, "wb"); if (light_cat == NULL) { printf("Unable to open %s\n", lightfile); exit(0); } cont = 0; fwrite(&cont, sizeof(long), 1, light_cat); /* Run over all the halos and save then in the light cone file */ for (l = 0; l < nh; l++) { for (i = -Nrep_x; i <= Nrep_x; i++) for (j = -Nrep_y; j <= Nrep_y; j++) for (k = -Nrep_z; k <= Nrep_z; k++) { /* Compute the distance for this replic */ Pos[0] = posh[l][0] + Lx * i - Pobs[0]; Pos[1] = posh[l][1] + Ly * j - Pobs[1]; Pos[2] = posh[l][2] + Lz * k - Pobs[2]; dist = 0.0; for (m = 0; m < 3; m++) dist += Pos[m] * Pos[m]; dist = sqrt(dist); if (out_halos == 3) { /* Compute the distance in redshift space */ vr = 0.0; for (m = 0; m < 3; m++) vr += velh[l][m] * Pos[m]; vr = vr / dist; for (m = 0; m < 3; m++) Pos[m] = Pos[m] + vr / Hz * (1.0 + redshift) * Pos[m] / dist; dist = dist + vr / Hz * (1.0 + redshift); } if (dist <= dist_min || dist > dist_max) continue; /* Compute the angle theta */ cost = 0.0; for (m = 0; m < 3; m++) cost += Pos[m] * LoS[m]; cost = cost / dist; if (cost < cos_min) continue; /* Save the information about this halo */ fwrite(&Pos[0], sizeof(float), 1, light_cat); fwrite(&Pos[1], sizeof(float), 1, light_cat); fwrite(&Pos[2], sizeof(float), 1, light_cat); fwrite(&velh[l][0], sizeof(float), 1, light_cat); fwrite(&velh[l][1], sizeof(float), 1, light_cat); fwrite(&velh[l][2], sizeof(float), 1, light_cat); fwrite(&Massh[l], sizeof(float), 1, light_cat); cont++; /* Put galaxies in this halo (one type) */ if (DO_HOD == 1) { /* * Compute the number of central and satellite * galaxies */ if (Ncentral(Massh[l], logMmin, siglogM) >= gsl_rng_uniform(rng_ptr)) Ncen = 1; else Ncen = 0; Nsat = gsl_ran_poisson(rng_ptr, (double)Nsatellite(Massh[l], logM0, logM1, alpha)); Ngals = Ncen + Nsat; /* Save the total number of galaxies */ fwrite(&Ngals, sizeof(int), 1, light_cat); /* Save the central galaxy */ fwrite(&Pos[0], sizeof(float), 1, light_cat); fwrite(&Pos[1], sizeof(float), 1, light_cat); fwrite(&Pos[2], sizeof(float), 1, light_cat); fwrite(&velh[l][0], sizeof(float), 1, light_cat); fwrite(&velh[l][1], sizeof(float), 1, light_cat); fwrite(&velh[l][2], sizeof(float), 1, light_cat); /* * Put the satellite galaxies following the NFW * profile */ if (Nsat > 0) { Rv = pow(3.0 * Massh[l] / (4.0 * M_PI * Dv * rhom), 1.0 / 3.0); C = f_c(Massh[l], (float)Mstar, z); A = log(1.0 + C) - C / (1.0 + C); } for (m = 0; m < Nsat; m++) { phi = 2.0 * M_PI * gsl_rng_uniform(rng_ptr); theta = M_PI * gsl_rng_uniform(rng_ptr); r = Generate_NFW(Rv, C, A, seed); kx = cysumf(Pos[0], r * sin(theta) * cos(phi), Lx); ky = cysumf(Pos[1], r * sin(theta) * sin(phi), Ly); kz = cysumf(Pos[2], r * cos(theta), Lz); fwrite(&kx, sizeof(float), 1, light_cat); fwrite(&ky, sizeof(float), 1, light_cat); fwrite(&kz, sizeof(float), 1, light_cat); kx = velh[i][0]; ky = velh[i][1]; kz = velh[i][2]; fwrite(&kx, sizeof(float), 1, light_cat); fwrite(&ky, sizeof(float), 1, light_cat); fwrite(&kz, sizeof(float), 1, light_cat); } } } } rewind(light_cat); fwrite(&cont, sizeof(long), 1, light_cat); fclose(light_cat); if (out_collapse == 1) { /* * Open the file to save the information about the collapsed * particles */ collapse_cat = fopen(collapsefile, "wb"); if (collapse_cat == NULL) { printf("Unable to open %s\n", collapsefile); exit(0); } /* Save the information about the colapsed particles */ int a, b, c; cont = 0; fwrite(&cont, sizeof(long), 1, collapse_cat); for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; for (a = -Nrep_x; a <= Nrep_x; a++) for (b = -Nrep_y; b <= Nrep_y; b++) for (c = -Nrep_z; c <= Nrep_z; c++) { /* Compute the distance for this replic */ Pos[0] = i * Lc + Lc / 2.0 + Lx * a; Pos[1] = j * Lc + Lc / 2.0 + Ly * b; Pos[2] = k * Lc + Lc / 2.0 + Lz * c; dist = 0.0; for (m = 0; m < 3; m++) dist += Pos[m] * Pos[m]; dist = sqrt(dist); if (dist <= dist_min || dist > dist_max) continue; /* Compute the angle theta */ cost = 0.0; for (m = 0; m < 3; m++) cost += Pos[m] * LoS[m]; cost = cost / dist; if (cost < cos_min) continue; tmp = flag[ind]; cont++; fwrite(&ind, sizeof(size_t), 1, collapse_cat); fwrite(&tmp, sizeof(int), 1, collapse_cat); fwrite(&redshift, sizeof(float), 1, collapse_cat); } } rewind(collapse_cat); fwrite(&cont, sizeof(long), 1, collapse_cat); fclose(collapse_cat); free(flag); } } /*******************/ /* Free the memory */ /*******************/ gsl_spline_free(spline_I); gsl_spline_free(spline_InvI); gsl_interp_accel_free(acc_I); gsl_interp_accel_free(acc_InvI); if (out_halos != 0) { free(Massh); for (i = 0; i < nh; i++) { free(velh[i]); free(posh[i]); } /* Free the rest */ free(velh); free(posh); free(halos); gsl_rng_free(rng_ptr); } return 0; }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <fftw3.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_spline.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include "fftlog.h" #define check_memory(p, name) if(p == NULL){printf("Problems to alloc %s.\n", name); return 0;} #define Lc_MAX 1.0e+2 #define Mc_MIN 1.0e+5 #define M_max 6.0e+15 /* Structure for the peaks in the density field */ typedef struct Halos_centers { int x[3]; /* Index of the halo center */ float den; /* Density of the halo's central cell */ } PEAKS; /* Structure for the final halos */ typedef struct Halos { int x[3]; /* Index of central cell of teh halo */ int cont; /* Number of cells in the halo */ } HALOS; /* Barrier used for the halo definition */ float Barrier(float S, float dc, char barrier, float a, float b, float alpha) { float resp; /* The Press-Schechter barrier */ if (barrier == 0) resp = dc; /* The Sheth-Tormen barrier */ else if (barrier == 1) resp = sqrt(a) * dc * (1.0 + b * pow(S / (a * dc * dc), alpha)); return resp; } /* Partition function for the quicksort */ long int partition_peaks(PEAKS a[], long l, long r) { long i, j, k; PEAKS pivot, t; pivot.den = a[l].den; for (k = 0; k < 3; k++) pivot.x[k] = a[l].x[k]; i = l; j = r + 1; while (1) { do ++i; while (a[i].den >= pivot.den && i < r); do --j; while (a[j].den < pivot.den); if (i >= j) break; t.den = a[i].den; a[i].den = a[j].den; a[j].den = t.den; for (k = 0; k < 3; k++) { t.x[k] = a[i].x[k]; a[i].x[k] = a[j].x[k]; a[j].x[k] = t.x[k]; } } t.den = a[l].den; a[l].den = a[j].den; a[j].den = t.den; for (k = 0; k < 3; k++) { t.x[k] = a[l].x[k]; a[l].x[k] = a[j].x[k]; a[j].x[k] = t.x[k]; } return j; } /* The quicksort algorithm to sort the peaks list */ void quickSort_peaks(PEAKS a[], long l, long r) { long j; if (l < r) { //divide and conquer j = partition_peaks(a, l, r); quickSort_peaks(a, l, j - 1); quickSort_peaks(a, j + 1, r); } } /* Define the distance between two cells */ long int dist2(long int i, long int j, long int k) { long int resp; resp = i * i + j * j + k * k; return resp; } /* Define the cyclic sum for floats */ float cysumf(float x, float y, float L) { float resp; resp = x + y; if (resp >= L) resp -= L; if (resp < 0) resp += L; return resp; } /* Define the cyclic sum */ int cysum(int i, int j, int nd) { int resp; resp = i + j; if (resp >= nd) resp -= nd; if (resp < 0) resp += nd; return resp; } /* Window function in the Fourier space */ double W(double k, double R) { double resp; resp = 3.0 / (pow(k * R, 2)) * (sin(k * R) / (k * R) - cos(k * R)); return resp; } /* Evaluate the square root of matter variance */ double calc_sigma(double *k, double *P, int Nk, double R) { int i; double resp; resp = 0.0; for (i = 0; i < Nk - 1; i++) resp += (k[i + 1] - k[i]) / 2.0 * (P[i] * pow(k[i] * W(k[i], R), 2) + P[i + 1] * pow(k[i + 1] * W(k[i + 1], R), 2)); return resp / (2.0 * M_PI * M_PI); } /* Evaluate the mass function for a given sigma */ double fh(double sigma, int model, double dc) { double resp, nu; double B, d, e, f, g; //Press - Schechter if (model == 0) { nu = dc / sigma; resp = sqrt(2.0 / M_PI) * nu * exp(-nu * nu / 2.0); } //Tinker Delta = 300 else if (model == 1) { B = 0.466; d = 2.06; e = 0.99; f = 0.48; g = 1.310; resp = B * (pow(sigma / e, -d) + pow(sigma, -f)) * exp(-g / (sigma * sigma)); } return resp; } /* Find the index of the next sphere */ int Next_Count(int *spheres, int Ncells, int count) { int i, resp; for (i = 0; i < Ncells; i++) if (spheres[i] == count) { resp = i + 1; break; } return resp; } /* Halo concentration */ float f_c(float Mv, float Mstar, float z) { float resp; resp = 9.0 / (1.0 + z) * pow(Mv / Mstar, -0.13); return resp; } /* Generate a random number from 0 to Rv following the NFW profile */ float Generate_NFW(float rv, float c, float A, int seed) { float Int, rs, r, rtmp; gsl_rng *rng_ptr; rng_ptr = gsl_rng_alloc(gsl_rng_taus); gsl_rng_set(rng_ptr, seed); Int = gsl_rng_uniform(rng_ptr); rs = rv / c; r = gsl_rng_uniform(rng_ptr); rtmp = r + 1.0; while (fabs(r - rtmp) > 0.001) { rtmp = r; r = r - ((log(1.0 + r * c) - r * c / (1.0 + r * c) - A * Int) * pow(1.0 + r * c, 2)) / (c * (2.0 * r * c + r * r * c * c)); } gsl_rng_free(rng_ptr); return r * rv; } /* Mean value of central galaxies */ float Ncentral(float M, float logMmin, float siglogM) { float resp; resp = 0.5 * (1.0 + erf((log10(M) - logMmin) / siglogM)); return resp; } /* Mean value of satellite galaxies */ float Nsatellite(float M, float logM0, float logM1, float alpha) { float resp; resp = pow((M - pow(10.0, logM0)) / pow(10.0, logM1), alpha); return resp; } int main(int argc, char *argv[]) { FILE *power, *den_grid, *halo_cat, *disp_cat, *light_cat, *collapse_cat; char powerfile[100], denfile[100], halofile[100], dispfile[100], lightfile[100], collapsefile[100]; char DO_2LPT, BARRIER, out_inter, out_halos, out_collapse, DEN_GRID, DISP_CAT, DO_HOD; int i, j, k, nx, ny, nz, nz2, Nmin, N_cores, Nk, Nr, Ncells, seed, cont_tmp, grows, grows_tmp, tmp, nmin, m, m_tmp, nsnap; long np, cont, nh, l; float Lc, Om0, redshift, Growth, dc, EB_a, EB_b, EB_alpha, rhoc, Hz, Omz, rhomz, Mtot, Mcell, Lx, Ly, Lz, klx, kly, klz, Normx, Normk, Dv, kx, ky, kz, kmod, *Sig_grid, sigtmp, std, den, den_tmp, factx, facty, factz, fact, phixx, phixy, phixz, phiyy, phiyz, phizz, Pobs[3], LoS[3], dist_min, dist_max, theta_min, cos_min; double *K, *P, *R, *M, *Sig, Rmin, Rmax, *R_xi, *Xi; fftwf_plan p1, p2; fftwf_complex *deltak, *deltak_tmp; float *delta; int *flag, *sphere; PEAKS *peaks, *tmpp; HALOS *halos; if (argc != 35) { printf("\nWrong number of arguments.\n"); printf("arg1: Name of the power spectrum file.\n"); printf("arg2: Size (in Mpc/h) or mass (in M_odot/h) of each cell.\n"); printf("arg3-5: Number of cells along each direction.\n"); printf("arg6: Some seed for the random number generator.\n"); printf("arg7: Use the 2LPT to move the halos? Yes (1) or No (0).\n"); printf("arg8: The Value of Omega_m today.\n"); printf("arg9: The readshift z.\n"); printf("arg10: The rate between the growth function at the final resdshit and at the redshift of the input power spectrum.\n"); printf("arg11: The value of critical density delta _{c}. Put 0 to use the fit.\n"); printf("arg12: The minimum number of partiles in a halo of the final catalogue.\n"); printf("arg13: The number of cores to use in the parallel parts.\n"); printf("arg14: Prefix for the outputs.\n"); printf("arg15: Which barrier would you like to use to find the halos?\n\tThe statical barrier (SB) (0);\n\tThe ellipsoidal barrier (EB) (1).\n"); printf("arg16: Which intermediate results would you like to save?:\n\tNo one (0);\n\tThe gaussian density grid (1);\n\tThe particles displaced with LPT (2);\n\tBoth (3).\n"); printf("arg17: How do you want the final halo catalogue?\n\tNo halo catalogue (0);\n\tThe positions and velocities in the real space (1);\n\tThe positions and velocities in the real space light cone (2);\n\tThe positions and velocities in redshift space light cone(3).\n"); printf("arg18-20: The three parameters for the ellipsoidal barrier: a, b and alpha.\n"); printf("arg21: Read the density grid (0) or compute it (1)?\n"); printf("arg22: Read the displacement field (0) or compute it (1)?\n"); printf("arg23-25: Position of the observer in units of the box size.\n"); printf("arg26-28: Direction of the line of sight.\n"); printf("arg29-30: Minimum and maximum comoving distance of the halos in this snapshot in the light cone.\n"); printf("arg31: Angular aperture of the light cone in units of pi.\n"); printf("arg32: Save the information about the collapsed particles in the light cone? Yes (1) or No (0).\n"); printf("arg33: Populate the halos with a HOD?\n\tNo (0);\n\tYes, with a single type of galaxy (1)\n\tYes, with multiple types of galaxies(2).\n"); printf("arg34: Number of this snapshot.\n"); exit(0); } /* Get the name of all files */ sprintf(powerfile, "%s", argv[1]); sprintf(denfile, "%s_den.dat", argv[14]); sprintf(halofile, "%s_halos.dat", argv[14]); sprintf(dispfile, "%s_disp.dat", argv[14]); /* * Parameters with specifications of the box and options for this * simulation */ Lc = atof(argv[2]); //Size or mass of each cell nx = atoi(argv[3]); //Number of cells along the x - direction ny = atoi(argv[4]); //Number of cells along the y - direction nz = atoi(argv[5]); //Number of cells along the z - direction seed = atoi(argv[6]); //Seed for the random generator(same seed gives the same final catalogue) DO_2LPT = (char)atoi(argv[7]); //Parameter with the information about the use(or not) of second order lagrangian perturbation theory Nmin = atoi(argv[12]); //Number of particles in the smaller final halo N_cores = atoi(argv[13]); //Number of cores used by openmp in the parallel parts BARRIER = (char)atoi(argv[15]); //Parameter with the information about the utilization(or not) of the EB out_inter = (char)atoi(argv[16]); //Parameter with the information about which intermediate results must be output out_halos = (char)atoi(argv[17]); //Parameter with the information about what to save in the final halo catalogue out_collapse = (char)atoi(argv[32]); //Parameter with the information about the collapsed particles in the light cone DEN_GRID = (char)atoi(argv[21]); //Compute a new density field(1) or just read it from a file(0) ? DISP_CAT = (char)atoi(argv[22]); //Compute the displacement field(1) or just read it from a file(0) ? DO_HOD = (char)atoi(argv[33]); //Populate the halos with no galaxies(0), one type of galaxy(1) or multiple types(2) ? /* Some physical parametrs used in this simulation */ Om0 = atof(argv[8]); //Omega_m value today(z = 0) redshift = atof(argv[9]); //Redshift of the final catalogues Growth = atof(argv[10]); //Ratio between the growth function at the final redshift and the redshift of the inpur power spectrum dc = atof(argv[11]); //Value of the critical density for the halo formation linearly extrapoleted using linear theory to the redshift of the final catalogues /* Parameters for the EB */ EB_a = atof(argv[18]); //Parameter a of the EB EB_b = atof(argv[19]); //Parameter b of the EB EB_alpha = atof(argv[20]); //Parameter alpha of the EB /* Parameters for the construction of the light cone */ Pobs[0] = atof(argv[23]); //Position x of the observer in units of the box size Pobs[1] = atof(argv[24]); //Position y of the observer in units of the box size Pobs[2] = atof(argv[25]); //Position z of the observer in units of the box size LoS[0] = atof(argv[26]); //Component x of the direction of the line of sight LoS[1] = atof(argv[27]); //Component y of the direction of the line of sight LoS[2] = atof(argv[28]); //Component z of the direction of the line of sight /* Normalize the LoS vector */ kmod = 0.0; for (i = 0; i < 3; i++) kmod += LoS[i] * LoS[i]; for (i = 0; i < 3; i++) LoS[i] = LoS[i] / sqrt(kmod); dist_min = atof(argv[29]); //Minimum comoving distance of this slice dist_max = atof(argv[30]); //Maximum comoving distance of this slice theta_min = atof(argv[31]) * M_PI; //Minimum angle theta cos_min = cos(theta_min); //Cossine of the minimum angle theta nsnap = atoi(argv[34]); //Number of this snapshot sprintf(lightfile, "%s_%d_LightCone.dat", argv[14], nsnap); sprintf(collapsefile, "%s_%d_Collapse.dat", argv[14], nsnap); /* Some derived parameters used in this simulation */ rhoc = 2.775e+11; //Critical density in unitis of M_odot / Mpc * h ^ 2 Hz = 100.0 * sqrt(Om0 * pow(1.0 + redshift, 3.0) + (1.0 - Om0)); //Hubble constant at the final redshift Omz = Om0 * pow(1.0 + redshift, 3.0) / (Om0 * pow(1.0 + redshift, 3.0) + (1.0 - Om0)); //Matter contrast density at the final redshift rhomz = Om0 * rhoc; //Matter density at the final redshift Dv = (18 * M_PI * M_PI + 82.0 * (Omz - 1.0) - 39.0 * pow(Omz - 1.0, 2.0)) / Omz; //Overdensity used to put galaxies in the halos if (Lc < Lc_MAX) //If the size of each cell was given compute the mass of each cell Mcell = rhomz * pow(Lc, 3.0); else if (Lc > Mc_MIN) { //If the mass of each cell was given compute the size of each cell Mcell = Lc; Lc = pow(Mcell / rhomz, 1.0 / 3.0); } else { //Notify an unexpected behavior and exit printf("A cell larger than %f [Mpc/h] or with a mass smaller than %e [M_odot/h] is not expected. Please, change this value or change the definition of Lc_MAX and Mc_MIN in the code.\n", Lc_MAX, Mc_MIN); exit(0); } Lx = Lc * nx; //Compute the size of the box along the x - direction Ly = Lc * ny; //Compute the size of the box along the y - direction Lz = Lc * nz; //Compute the size of the box along the z - direction Mtot = rhomz * Lx * Ly * Lz; //Compute the total mass in the box klx = 2.0 * M_PI / Lx; //Compute the fundamental frequency in the x - direction kly = 2.0 * M_PI / Ly; //Compute the fundamental frequency in the y - direction klz = 2.0 * M_PI / Lz; //Compute the fundamental frequency in the z - direction Normx = 1.0 / sqrt(Lx * Ly * Lz); //Compute the normalization needed when aplyed the FFTW3 from k to x space Normk = sqrt(Lx * Ly * Lz) / (nx * ny * nz); //Compute the normalization needed when aplyed the FFTW3 from x to k space nz2 = nz / 2 + 1; //Quantity used to alloc the complex arrays used in the FFTW3 nmin = nx; //Determine the smaller direction if (nmin > ny) nmin = ny; if (nmin > nz) nmin = nz; /* * Compute the number of repetitions of this box to construct the light * cone */ float Pos[3], dist, cost, vr, Mass; int Nrep_x, Nrep_y, Nrep_z; if (out_halos == 2 || out_halos == 3) { Nrep_x = floor(dist_max / Lx) + 1; Nrep_y = floor(dist_max / Ly) + 1; Nrep_z = floor(dist_max / Lz) + 1; } /* Parameters of the HOD model */ int Ngals, Ncen, Nsat; float r, phi, theta, Rv, C, A; float logMmin, siglogM, logM0, logM1, alpha; logMmin = 12.44005264; siglogM = 0.79560376; logM0 = 11.98154109; logM1 = 12.99600074; alpha = 1.13717828; /* Check some inputs before to start */ if (out_inter == 0 && out_halos == 0) { printf("You need to choose something to output! arg16, arg17 and/or arg18 must be >0!\n"); exit(0); } if (nx < 0 || ny < 0 || nz < 0) { printf("You are trying to use n = (%d, %d, %d) and it is not possible!\n", nx, ny, nz); exit(0); } if (DO_2LPT < 0 || DO_2LPT > 1) { printf("You are trying to use DO_2LPT = %d and it is not possible! Setting DO_2LPT = 0.\n", DO_2LPT); DO_2LPT = 0; } if (Growth <= 0.0) { printf("You gave a value of the ratio between the growths of %f and it is not physical!\n", Growth); exit(0); } if (Nmin < 0) { printf("You gave a negative number for the number of particles in the smaller halo (%d). Settin it in 1.\n", Nmin); Nmin = 1; } if (N_cores < 0) { printf("You gave a negative number for the number of cores (%d). Settin it in 1.\n", N_cores); N_cores = 1; } if (BARRIER != 0 && BARRIER != 1) { printf("You need to chose a valid barrier for the void detection! Your choice were %d.\n", BARRIER); exit(0); } if (Om0 > 1.0 || Om0 < 0.0) { printf("Your Omega _{m} = %f! Put some valid value between 0.0 and 1.0.\n", Om0); exit(0); } if (dc < 0.0) { printf("Your delta_{c} = %f < 0. Using the fit.\n", dc); dc = 1.686 * pow(Omz, 0.0055); } if (dc == 0.0) dc = 1.686 * pow(Omz, 0.0055); if (out_halos > 1 && theta_min > 1.0) { printf("Theta min must be equal or smaller than 1! Setting it to 1.\n"); theta_min = 1.0; cos_min = -1.0; } if (out_halos > 1 && LoS[0] == 0.0 && LoS[1] == 0.0 && LoS[2] == 0.0) { printf("You must give a non vanishing vector for the direction of the line of sight!\n"); exit(0); } if (out_collapse == 1 && out_halos < 2) { printf("It is not possible to save the information about the collapsed particles without the creation of a light cone. Ignoring this parameter.\n"); out_collapse = 0; } printf("\nRunning the ExSHalos!\n\ Omega_m = %.3f, z = %.3f, Growth = %.3f, H = %.2f, d_c = %.3f and Delta_virial = %.1f\n\ L = (%.5f, %.5f, %.5f), N_cells = (%d, %d, %d), M_tot = %.5e, M_cell = %.5e and seed = %d.\n", Omz, redshift, Growth, Hz, dc, Dv, Lx, Ly, Lz, nx, ny, nz, Mtot, Mcell, seed); omp_set_num_threads(N_cores); //Set the number of cores used by the openmp /**************************************/ /* Constructing the density grids */ /**************************************/ printf("\nConstructing the density grid in real and fourier space!\n"); /* Opennning the power spectrum file */ power = fopen(powerfile, "r"); if (power == NULL) { printf("Unable to open %s\n", powerfile); exit(0); } /* Measuring the number of k's */ Nk = -1; while (!feof(power)) { fscanf(power, "%f %f", &kx, &ky); Nk++; } rewind(power); /* Reading the power spectrum */ K = (double *)malloc(Nk * sizeof(double)); check_memory(K, "K") P = (double *)malloc(Nk * sizeof(double)); check_memory(P, "P") for (i = 0; i < Nk; i++) { fscanf(power, "%lf %lf", &K[i], &P[i]); P[i] = pow((double)Growth, 2.0) * P[i]; } fclose(power); /* Evaluating the Sigma(R) */ Nr = Nk; R = (double *)malloc(Nr * sizeof(double)); check_memory(R, "R") M = (double *)malloc(Nr * sizeof(double)); check_memory(M, "M") Sig = (double *)malloc(Nr * sizeof(double)); check_memory(Sig, "Sig") Rmin = (double)pow(Mcell * 0.9 * 3.0 / (4.0 * M_PI * rhomz), 1.0 / 3.0); Rmax = (double)pow(M_max * 3.0 / (4.0 * M_PI * rhomz), 1.0 / 3.0); for (i = 0; i < Nr; i++) { R[i] = pow(10, log10(Rmin) + i * (log10(Rmax) - log10(Rmin)) / (Nr - 1)); M[i] = 4.0 / 3.0 * M_PI * (double)rhomz *pow(R[i], 3); } for (i = 0; i < Nr; i++) Sig[i] = sqrt(calc_sigma(K, P, Nk, R[i])); /* Interpolating the Sigma(M) */ gsl_interp_accel *acc = gsl_interp_accel_alloc(); gsl_spline *spline = gsl_spline_alloc(gsl_interp_cspline, Nr); gsl_spline_init(spline, M, Sig, Nr); /* Evaluate the integral of the mass function */ double *Int; Int = (double *)malloc(Nr * sizeof(double)); check_memory(Int, "Int") Int[0] = 0.0; for (i = 1; i < Nr; i++) Int[i] = Int[i - 1] - (log(Sig[i]) - log(Sig[i - 1])) / 2.0 * (fh(Sig[i], 1, (double)dc) / pow(R[i], -3.0) + fh(Sig[i - 1], 1, (double)dc) / pow(R[i - 1], -3.0)); /* * Interpolate the integral of the mass function as function of mass and * its inverse */ gsl_interp_accel *acc_I = gsl_interp_accel_alloc(); gsl_interp_accel *acc_InvI = gsl_interp_accel_alloc(); gsl_spline *spline_I = gsl_spline_alloc(gsl_interp_cspline, Nr); gsl_spline *spline_InvI = gsl_spline_alloc(gsl_interp_cspline, Nr); gsl_spline_init(spline_I, M, Int, Nr); gsl_spline_init(spline_InvI, Int, M, Nr); free(Int); /* Compute the Sigma as function of the number of cells in the halo */ Ncells = floor(M_max / Mcell); Sig_grid = (float *)malloc(Ncells * sizeof(float)); check_memory(Sig_grid, "Sig_grid") Sig_grid[0] = 1e+30; for (i = 1; i < Ncells; i++) Sig_grid[i] = pow(gsl_spline_eval(spline, i * Mcell, acc), 2.0); gsl_spline_free(spline); gsl_interp_accel_free(acc); free(R); free(M); free(Sig); /* Read the density grid */ if (DEN_GRID == 0) { delta = (float *)fftwf_malloc((size_t) nx * (size_t) ny * (size_t) nz * sizeof(float)); check_memory(delta, "delta") printf("Reading the density grid\n"); den_grid = fopen(denfile, "rb"); if (den_grid == NULL) { printf("Unable to open %s\n", denfile); exit(0); } fread(&nx, sizeof(int), 1, den_grid); fread(&ny, sizeof(int), 1, den_grid); fread(&nz, sizeof(int), 1, den_grid); fread(&Lc, sizeof(float), 1, den_grid); for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; fread(&delta[ind], sizeof(float), 1, den_grid); delta[ind] = Growth * delta[ind]; } fclose(den_grid); } /* Construct the density grid */ if (DEN_GRID == 1) { /* Compute the Power spectrum in the box */ R_xi = (double *)malloc(Nk * sizeof(double)); check_memory(R_xi, "R_xi") Xi = (double *)malloc(Nk * sizeof(double)); check_memory(Xi, "Xi") pk2xi(Nk, K, P, R_xi, Xi); for (i = 0; i < Nk; i++) if (R_xi[i] > (double)pow(Lx * Ly * Lz, 1.0 / 3.0) / 2.0) Xi[i] = 0.0; xi2pk(Nk, R_xi, Xi, K, P); free(R_xi); free(Xi); /* Interpolate the power spectrum */ acc = gsl_interp_accel_alloc(); spline = gsl_spline_alloc(gsl_interp_cspline, Nk); gsl_spline_init(spline, K, P, Nk); free(K); free(P); /* Allocating the density grids */ delta = (float *)fftwf_malloc((size_t) nx * (size_t) ny * (size_t) nz * sizeof(float)); check_memory(delta, "delta") deltak_tmp = (fftwf_complex *) fftwf_malloc((size_t) nx * (size_t) ny * (size_t) nz2 * sizeof(fftwf_complex)); check_memory(deltak_tmp, "deltak_tmp") deltak = (fftwf_complex *) fftwf_malloc((size_t) nx * (size_t) ny * (size_t) nz2 * sizeof(fftwf_complex)); check_memory(deltak, "deltak") /* Alloc the needed quantities for the random generator */ gsl_rng *rng_ptr; rng_ptr = gsl_rng_alloc(gsl_rng_taus); gsl_rng_set(rng_ptr, seed); /* Constructing the Fourier space density grid */ #pragma omp parallel for private(i, j, k, kx, ky, kz, kmod, std) for (i = 0; i < nx; i++) { if (2 * i < nx) kx = (float)i *klx; else kx = (float)(i - nx) * klx; for (j = 0; j < ny; j++) { if (2 * j < ny) ky = (float)j *kly; else ky = (float)(j - ny) * kly; for (k = 0; k < nz2; k++) { kz = (float)k *klz; if (k == nz / 2) kz = -(float)nz / 2.0 * klz; size_t ind = (size_t) (i * ny + j) * (size_t) nz2 + (size_t) k; kmod = sqrt(kx * kx + ky * ky + kz * kz); if (kmod == 0.0) kmod = pow(klx * kly * klz, 1.0 / 3.0) / 4.0; std = sqrt(gsl_spline_eval(spline, kmod, acc) / 2.0); /* Generate Gaussian random number with std */ deltak[ind][0] = (float)gsl_ran_gaussian(rng_ptr, std); deltak[ind][1] = (float)gsl_ran_gaussian(rng_ptr, std); deltak_tmp[ind][0] = deltak[ind][0]; deltak_tmp[ind][1] = deltak[ind][1]; if (isnan(deltak_tmp[ind][0])) printf("Problem with deltak_tmp[%ld][0]\n", ind); if (isnan(deltak_tmp[ind][1])) printf("Problem with deltak_tmp[%ld][1]\n", ind); } } } gsl_spline_free(spline); gsl_interp_accel_free(acc); gsl_rng_free(rng_ptr); /* Execute the FFTW3 to compute the density grid in real space */ p1 = fftwf_plan_dft_c2r_3d(nx, ny, nz, deltak_tmp, delta, FFTW_ESTIMATE); fftwf_execute(p1); fftwf_free(deltak_tmp); /* Save the density grid */ if (out_inter == 1 || out_inter == 3) { printf("Saving the density grid\n"); den_grid = fopen(denfile, "wb"); if (den_grid == NULL) { printf("Unable to open %s\n", denfile); exit(0); } fwrite(&nx, sizeof(int), 1, den_grid); fwrite(&ny, sizeof(int), 1, den_grid); fwrite(&nz, sizeof(int), 1, den_grid); fwrite(&Lc, sizeof(float), 1, den_grid); for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; fwrite(&delta[ind], sizeof(float), 1, den_grid); } fclose(den_grid); } } /* Compute the mean and std of the linear density field */ kx = 0.0; ky = 0.0; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; delta[ind] = delta[ind] * Normx; kx += delta[ind] * delta[ind]; ky += delta[ind]; } kx = kx / ((float)nx * (float)ny * (float)nz); ky = ky / ((float)nx * (float)ny * (float)nz); printf("Mean = %f and Sigma = %f\n", ky, sqrt(kx - ky * ky)); /*************************/ /* Finding the halos */ /*************************/ if (out_halos != 0) { printf("\nFinding the spherical halos!\n"); /* Alloc the flag array */ flag = (int *)malloc((size_t) nx * (size_t) ny * (size_t) nz * sizeof(int)); check_memory(flag, "flag") /* Initialize the flag array */ for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; flag[ind] = -1; } /* Counting the number of peaks */ np = 0; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; den = delta[ind]; if (den > delta[(size_t) (cysum(i, 1, nx) * ny + j) * (size_t) nz + (size_t) k] && den > delta[(size_t) (cysum(i, -1, nx) * ny + j) * (size_t) nz + (size_t) k] && den > delta[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] && den > delta[(size_t) (i * ny + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] && den > delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] && den > delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -1, nz)]) np++; } /* Alloc the array with the peaks and final halos */ peaks = (PEAKS *) malloc(np * sizeof(PEAKS)); halos = (HALOS *) malloc(np * sizeof(HALOS)); cont = 0; /* Save the position and density of each peak */ for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; den = delta[ind]; if (den > delta[(size_t) (cysum(i, 1, nx) * ny + j) * (size_t) nz + (size_t) k] && den > delta[(size_t) (cysum(i, -1, nx) * ny + j) * (size_t) nz + (size_t) k] && den > delta[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] && den > delta[(size_t) (i * ny + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] && den > delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] && den > delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -1, nz)]) { peaks[cont].x[0] = i; peaks[cont].x[1] = j; peaks[cont].x[2] = k; peaks[cont].den = den; cont++; } } /* Check the new number of peaks and elements in the peaks array */ if (cont != np) { printf("The number of peaks does not match. %ld != %ld!\n", np, cont); exit(0); } /* Sort the peaks */ quickSort_peaks(peaks, 0, np - 1); /* Grow the spherical halos around the density peaks */ nh = 0; printf("We have %ld peaks\n", np); for (l = 0; l < np; l++) { /* If this peak is already in a halo jump to teh next one */ if (flag[(size_t) (peaks[l].x[0] * ny + peaks[l].x[1]) * (size_t) nz + (size_t) peaks[l].x[2]] != -1) continue; /* * Check if this peak is near to the slice used to construct the * light cone */ if (out_halos == 2 || out_halos == 3) { m = 1; for (i = -Nrep_x; i <= Nrep_x; i++) for (j = -Nrep_y; j <= Nrep_y; j++) for (k = -Nrep_z; k <= Nrep_z; k++) { /* Compute the distance for this replic */ Pos[0] = (peaks[l].x[0] + 0.5) * Lc + Lx * i - Pobs[0]; Pos[1] = (peaks[l].x[1] + 0.5) * Lc + Ly * j - Pobs[1]; Pos[2] = (peaks[l].x[2] + 0.5) * Lc + Lz * k - Pobs[2]; dist = 0.0; for (m = 0; m < 3; m++) dist += Pos[m] * Pos[m]; dist = sqrt(dist); if (dist <= dist_min - Rmax || dist > dist_max + Rmax) m = 0; /* Compute the angle theta */ cost = 0.0; for (m = 0; m < 3; m++) cost += Pos[m] * LoS[m]; cost = cost / dist; if (theta_min + Rmax / dist < M_PI && cost < cos(theta_min + Rmax / dist)) m = 0; } if (m == 0) continue; } den = peaks[l].den; den_tmp = peaks[l].den; cont = 0; cont_tmp = 1; grows_tmp = 0; /* Grows the shells up to the minimum of the barrier */ while (den_tmp >= Barrier(Sig_grid[Ncells - 1], dc, BARRIER, EB_a, EB_b, EB_alpha)) { if (cont < cont_tmp) grows = grows_tmp; grows_tmp++; den = den_tmp; cont = cont_tmp; den_tmp = den * (float)cont; tmp = floor(sqrt((double)grows_tmp)); if (tmp > nmin / 2) tmp = nmin / 2; for (i = -tmp; i <= tmp; i++) for (j = -tmp; j <= tmp; j++) for (k = -tmp; k <= tmp; k++) if (dist2(i, j, k) == grows_tmp) { size_t ind = (size_t) (cysum(peaks[l].x[0], i, nx) * ny + cysum(peaks[l].x[1], j, ny)) * (size_t) nz + (size_t) cysum(peaks[l].x[2], k, nz); if (flag[ind] != -1) den_tmp += -Mtot; else den_tmp += delta[ind]; cont_tmp++; } den_tmp = den_tmp / (float)cont_tmp; } /* Decrease the shells up to the correct value of the barrier */ while (den < Barrier(Sig_grid[cont], dc, BARRIER, EB_a, EB_b, EB_alpha) && cont > 0) { den_tmp = den; cont_tmp = cont; den = den * (float)cont; tmp = floor(sqrt((double)grows)); if (tmp > nmin / 2) tmp = nmin / 2; for (i = -tmp; i <= tmp; i++) for (j = -tmp; j <= tmp; j++) for (k = -tmp; k <= tmp; k++) if (dist2(i, j, k) == grows) { size_t ind = (size_t) (cysum(peaks[l].x[0], i, nx) * ny + cysum(peaks[l].x[1], j, ny)) * (size_t) nz + (size_t) cysum(peaks[l].x[2], k, nz); den -= delta[ind]; cont--; } if (cont > 0) den = den / (float)cont; if (cont < cont_tmp) grows_tmp = grows; grows--; } if (cont == 0) continue; /* Put the correct flags to the cells */ tmp = floor(sqrt((double)grows_tmp)); for (i = -tmp; i <= tmp; i++) for (j = -tmp; j <= tmp; j++) for (k = -tmp; k <= tmp; k++) if (dist2(i, j, k) < grows_tmp) { size_t ind = (size_t) (cysum(peaks[l].x[0], i, nx) * ny + cysum(peaks[l].x[1], j, ny)) * (size_t) nz + (size_t) cysum(peaks[l].x[2], k, nz); if (flag[ind] != -1) printf("(1): This flag != -1! Flag = %d and the new one is %ld\n", flag[ind], nh); flag[ind] = nh; } /* Save the halo information */ if (cont >= Nmin) { halos[nh].cont = cont; for (i = 0; i < 3; i++) halos[nh].x[i] = peaks[l].x[i]; nh++; } else { for (i = -tmp; i <= tmp; i++) for (j = -tmp; j <= tmp; j++) for (k = -tmp; k <= tmp; k++) if (dist2(i, j, k) < grows_tmp) { size_t ind = (size_t) (cysum(peaks[l].x[0], i, nx) * ny + cysum(peaks[l].x[1], j, ny)) * (size_t) nz + (size_t) cysum(peaks[l].x[2], k, nz); flag[ind] = -2; } } } free(peaks); free(Sig_grid); /* * Find the possible number of particles in a halo sphere = (int * *)malloc(Ncells*sizeof(int)); m = 0; for(l=0;l<10000;l++){ * if(l%100 == 0) printf("l = %ld\n", l); * * tmp = floor(sqrt((float) l)); cont = 0; * * for(i=-tmp;i<=tmp;i++) for(j=-tmp;j<=tmp;j++) for(k=-tmp;k<=tmp;k++) * if(dist2(i, j, k) == l) cont ++; * * if(cont > 0){ if(m > 0) sphere[m] = sphere[m-1] + cont; else * phere[m] = cont; * * m ++; } } * /*Save this information den_grid = fopen("Spheres.dat", "wb"); if * (den_grid == NULL) { printf("Unable to open spheres.dat\n"); * exit(0); } * * fwrite(&m, sizeof(int), 1, den_grid); for(i=0;i<m;i++) * fwrite(&sphere[i], sizeof(int), 1, den_grid); fclose(den_grid); */ /* Read the data with the number of cells in each sphere */ den_grid = fopen("Spheres.dat", "rb"); if (den_grid == NULL) { printf("Unable to open spheres.dat\n"); exit(0); } fread(&m, sizeof(int), 1, den_grid); sphere = (int *)malloc(m * sizeof(int)); for (i = 0; i < m; i++) fread(&sphere[i], sizeof(int), 1, den_grid); fclose(den_grid); printf("We have %ld halos\n", nh); } /********************************/ /* Displacing the particles */ /********************************/ printf("\nDisplacing the particles using 1LPT!\n"); /* * Define the arrays to store the final position, velocity and mass of * each halo */ float **velh, **posh, *Massh; if (out_halos != 0) { gsl_rng *rng_ptr; rng_ptr = gsl_rng_alloc(gsl_rng_taus); gsl_rng_set(rng_ptr, seed); Massh = (float *)malloc(nh * sizeof(float)); velh = (float **)malloc(nh * sizeof(float *)); posh = (float **)malloc(nh * sizeof(float *)); for (i = 0; i < nh; i++) { velh[i] = (float *)malloc(3 * sizeof(float)); posh[i] = (float *)malloc(3 * sizeof(float)); for (j = 0; j < 3; j++) { posh[i][j] = 0.0; velh[i][j] = 0.0; } cont = Next_Count(sphere, Ncells, halos[i].cont); den_tmp = gsl_spline_eval(spline_I, halos[i].cont * Mcell, acc_I) + (gsl_spline_eval(spline_I, sphere[cont] * Mcell, acc_I) - gsl_spline_eval(spline_I, halos[i].cont * Mcell, acc_I)) * gsl_rng_uniform(rng_ptr); Massh[i] = gsl_spline_eval(spline_InvI, den_tmp, acc_InvI); } free(sphere); } /* Read the displacement field */ if (DISP_CAT == 0) { /* Open the output file for the displacement field */ printf("Reading the displacement field\n"); disp_cat = fopen(dispfile, "rb"); if (disp_cat == NULL) { printf("Unable to open %s\n", dispfile); exit(0); } fread(&nx, sizeof(int), 1, disp_cat); fread(&ny, sizeof(int), 1, disp_cat); fread(&nz, sizeof(int), 1, disp_cat); fread(&Lc, sizeof(float), 1, disp_cat); /* Read the displacement and add to each halo */ for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; if (DO_2LPT == 0) { fread(&kx, sizeof(float), 1, disp_cat); fread(&ky, sizeof(float), 1, disp_cat); fread(&kz, sizeof(float), 1, disp_cat); if (out_halos != 0) { tmp = flag[ind]; if (tmp < 0) continue; posh[tmp][0] += Growth * kx; posh[tmp][1] += Growth * ky; posh[tmp][2] += Growth * kz; velh[tmp][0] += Growth * pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * kx; velh[tmp][1] += Growth * pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * ky; velh[tmp][2] += Growth * pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * kz; } } else { fread(&kx, sizeof(float), 1, disp_cat); fread(&ky, sizeof(float), 1, disp_cat); fread(&kz, sizeof(float), 1, disp_cat); fread(&factx, sizeof(float), 1, disp_cat); fread(&facty, sizeof(float), 1, disp_cat); fread(&factz, sizeof(float), 1, disp_cat); if (out_halos != 0) { tmp = flag[ind]; if (tmp < 0) continue; posh[tmp][0] += Growth * (kx - Growth * 3.0 / 7.0 * pow(Omz, -1.0 / 143) * factx); posh[tmp][1] += Growth * (ky - Growth * 3.0 / 7.0 * pow(Omz, -1.0 / 143) * facty); posh[tmp][2] += Growth * (kz - Growth * 3.0 / 7.0 * pow(Omz, -1.0 / 143) * factz); velh[tmp][0] += Growth * (pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * kx); //-Growth * 3.0 / 7.0 * pow(Omz, -1.0 / 143) * 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * factx); velh[tmp][1] += Growth * (pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * ky); //-Growth * 3.0 / 7.0 * pow(Omz, -1.0 / 143) * 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * facty); velh[tmp][2] += Growth * (pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * kz); //-Growth * 3.0 / 7.0 * pow(Omz, -1.0 / 143) * 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * factz); } } } fclose(disp_cat); } /* Compute the displacement field */ if (DISP_CAT == 1) { /* Define the arrays with the displacement field used in 2LPT */ float *S1, *S2, *S3; if (DO_2LPT == 1) { S1 = (float *)malloc((size_t) nx * (size_t) ny * (size_t) nz * sizeof(float)); S2 = (float *)malloc((size_t) nx * (size_t) ny * (size_t) nz * sizeof(float)); S3 = (float *)malloc((size_t) nx * (size_t) ny * (size_t) nz * sizeof(float)); } /* Alloc deltak */ if (DEN_GRID == 0) { deltak = (fftwf_complex *) fftwf_malloc((size_t) nx * (size_t) ny * (size_t) nz2 * sizeof(fftwf_complex)); check_memory(deltak, "deltak") } /* Redefine the FFTW3 plan to compute the displacements */ fftwf_destroy_plan(p1); p1 = NULL; p1 = fftwf_plan_dft_c2r_3d(nx, ny, nz, deltak, delta, FFTW_ESTIMATE); /* Divide the fourier space density by the green's function */ #pragma omp parallel for private(i, j, k, kx, ky, kz, factx, facty, factz, fact) for (i = 0; i < nx; i++) { if (2 * i < nx) kx = i * klx; else kx = (i - nx) * klx; factx = 1.0 / 90.0 * (2.0 * cos(3.0 * kx * Lc) - 27.0 * cos(2.0 * kx * Lc) + 270.0 * cos(kx * Lc) - 245.0) / (Lc * Lc); for (j = 0; j < ny; j++) { if (2 * j < ny) ky = j * kly; else ky = (j - ny) * kly; facty = 1.0 / 90.0 * (2.0 * cos(3.0 * ky * Lc) - 27.0 * cos(2.0 * ky * Lc) + 270.0 * cos(ky * Lc) - 245.0) / (Lc * Lc); for (k = 0; k < nz2; k++) { kz = k * klz; if (k == nz / 2) kz = -(float)nz / 2.0 * klz; factz = 1.0 / 90.0 * (2.0 * cos(3.0 * kz * Lc) - 27.0 * cos(2.0 * kz * Lc) + 270.0 * cos(kz * Lc) - 245.0) / (Lc * Lc); size_t ind = (size_t) (i * ny + j) * (size_t) nz2 + (size_t) k; if (kx != 0.0 || ky != 0.0 || kz != 0.0) { fact = factx + facty + factz; deltak[ind][0] = deltak[ind][0] / fact; deltak[ind][1] = deltak[ind][1] / fact; } else { deltak[ind][0] = 0.0; deltak[ind][1] = 0.0; } } } } /* Compute the potential at first order */ fftwf_execute(p1); /* * Compute the first order displacements and update the position and * velocity of each halo */ if (DO_2LPT == 1) { #pragma omp parallel for private(i, j, k, tmp) for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; S1[ind] = -(1.0 * delta[(size_t) (cysum(i, 3, nx) * ny + j) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (cysum(i, 2, nx) * ny + j) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (cysum(i, 1, nx) * ny + j) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (cysum(i, -1, nx) * ny + j) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (cysum(i, -2, nx) * ny + j) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (cysum(i, -3, nx) * ny + j) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); S2[ind] = -(1.0 * delta[(size_t) (i * ny + cysum(j, 3, ny)) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (i * ny + cysum(j, 2, ny)) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (i * nx + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (i * ny + cysum(j, -2, ny)) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (i * ny + cysum(j, -3, ny)) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); S3[ind] = -(1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 3, nz)] - 9.0 * delta[(size_t) (i * ny + j) * nz + (size_t) cysum(k, 2, nz)] + 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] - 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + cysum(k, -1, nz)] + 9.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -2, nz)] - 1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -3, nz)]) * Normx / (60.0 * Lc); if (out_halos != 0) { tmp = flag[ind]; if (tmp < 0) continue; posh[tmp][0] += S1[ind]; posh[tmp][1] += S2[ind]; posh[tmp][2] += S3[ind]; velh[tmp][0] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * S1[ind]; velh[tmp][1] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * S2[ind]; velh[tmp][2] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * S3[ind]; } } } else { /* Open the output file for the displacement field */ if (out_inter == 2 || out_inter == 3) { printf("Saving the displaced particles\n"); disp_cat = fopen(dispfile, "wb"); if (disp_cat == NULL) { printf("Unable to open %s\n", dispfile); exit(0); } fwrite(&nx, sizeof(int), 1, disp_cat); fwrite(&ny, sizeof(int), 1, disp_cat); fwrite(&nz, sizeof(int), 1, disp_cat); fwrite(&Lc, sizeof(float), 1, disp_cat); } #pragma omp parallel for private(i, j, k, tmp, kx, ky, kz) for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; /* save the displacement field */ if (out_inter == 2 || out_inter == 3) { kx = -(1.0 * delta[(size_t) (cysum(i, 3, nx) * ny + j) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (cysum(i, 2, nx) * ny + j) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (cysum(i, 1, nx) * ny + j) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (cysum(i, -1, nx) * ny + j) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (cysum(i, -2, nx) * ny + j) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (cysum(i, -3, nx) * ny + j) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); ky = -(1.0 * delta[(size_t) (i * ny + cysum(j, 3, ny)) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (i * ny + cysum(j, 2, ny)) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (i * nx + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (i * ny + cysum(j, -2, ny)) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (i * ny + cysum(j, -3, ny)) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); kz = -(1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 3, nz)] - 9.0 * delta[(size_t) (i * ny + j) * nz + (size_t) cysum(k, 2, nz)] + 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] - 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + cysum(k, -1, nz)] + 9.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -2, nz)] - 1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -3, nz)]) * Normx / (60.0 * Lc); fwrite(&kx, sizeof(float), 1, disp_cat); fwrite(&ky, sizeof(float), 1, disp_cat); fwrite(&kz, sizeof(float), 1, disp_cat); if (out_halos != 0) { tmp = flag[ind]; if (tmp < 0) continue; posh[tmp][0] += kx; posh[tmp][1] += ky; posh[tmp][2] += kz; velh[tmp][0] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * kx; velh[tmp][1] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * ky; velh[tmp][2] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * kz; } } /* Do not save the displacements */ else if (out_halos != 0) { tmp = flag[ind]; if (tmp < 0) continue; kx = -(1.0 * delta[(size_t) (cysum(i, 3, nx) * ny + j) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (cysum(i, 2, nx) * ny + j) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (cysum(i, 1, nx) * ny + j) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (cysum(i, -1, nx) * ny + j) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (cysum(i, -2, nx) * ny + j) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (cysum(i, -3, nx) * ny + j) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); ky = -(1.0 * delta[(size_t) (i * ny + cysum(j, 3, ny)) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (i * ny + cysum(j, 2, ny)) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (i * nx + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (i * ny + cysum(j, -2, ny)) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (i * ny + cysum(j, -3, ny)) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); kz = -(1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 3, nz)] - 9.0 * delta[(size_t) (i * ny + j) * nz + (size_t) cysum(k, 2, nz)] + 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] - 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + cysum(k, -1, nz)] + 9.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -2, nz)] - 1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -3, nz)]) * Normx / (60.0 * Lc); posh[tmp][0] += kx; posh[tmp][1] += ky; posh[tmp][2] += kz; velh[tmp][0] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * kx; velh[tmp][1] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * ky; velh[tmp][2] += pow(Omz, 5.0 / 9.0) * Hz / (1.0 + redshift) * kz; } } if (out_inter == 2 || out_inter == 3) fclose(disp_cat); } if (DO_2LPT == 1) { printf("Displacing the particles using 2LPT!\n"); /* Evaluating the second order contribution */ p2 = fftwf_plan_dft_r2c_3d(nx, ny, nz, delta, deltak, FFTW_ESTIMATE); /* Compute the second order "density" */ #pragma omp parallel for private(i, j, k) for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { phixx = (1.0 * S1[(size_t) (cysum(i, 3, nx) * ny + j) * (size_t) nz + (size_t) k] - 9.0 * S1[(size_t) (cysum(i, 2, nx) * ny + j) * (size_t) nz + (size_t) k] + 45.0 * S1[(size_t) (cysum(i, 1, nx) * ny + j) * (size_t) nz + (size_t) k] - 45.0 * S1[(size_t) (cysum(i, -1, nx) * ny + j) * (size_t) nz + (size_t) k] + 9.0 * S1[(size_t) (cysum(i, -2, nx) * ny + j) * (size_t) nz + (size_t) k] - 1.0 * S1[(size_t) (cysum(i, -3, nx) * ny + j) * (size_t) nz + (size_t) k]) / (60.0 * Lc); phixy = (1.0 * S1[(size_t) (i * ny + cysum(j, 3, ny)) * (size_t) nz + (size_t) k] - 9.0 * S1[(size_t) (i * ny + cysum(j, 2, ny)) * (size_t) nz + (size_t) k] + 45.0 * S1[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] - 45.0 * S1[(size_t) (i * nx + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] + 9.0 * S1[(size_t) (i * ny + cysum(j, -2, ny)) * (size_t) nz + (size_t) k] - 1.0 * S1[(size_t) (i * ny + cysum(j, -3, ny)) * (size_t) nz + (size_t) k]) / (60.0 * Lc); phixz = (1.0 * S1[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 3, nz)] - 9.0 * S1[(size_t) (i * ny + j) * nz + (size_t) cysum(k, 2, nz)] + 45.0 * S1[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] - 45.0 * S1[(size_t) (i * ny + j) * (size_t) nz + cysum(k, -1, nz)] + 9.0 * S1[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -2, nz)] - 1.0 * S1[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -3, nz)]) / (60.0 * Lc); phiyy = (1.0 * S2[(size_t) (i * ny + cysum(j, 3, ny)) * (size_t) nz + (size_t) k] - 9.0 * S2[(size_t) (i * ny + cysum(j, 2, ny)) * (size_t) nz + (size_t) k] + 45.0 * S2[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] - 45.0 * S2[(size_t) (i * nx + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] + 9.0 * S2[(size_t) (i * ny + cysum(j, -2, ny)) * (size_t) nz + (size_t) k] - 1.0 * S2[(size_t) (i * ny + cysum(j, -3, ny)) * (size_t) nz + (size_t) k]) / (60.0 * Lc); phiyz = (1.0 * S2[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 3, nz)] - 9.0 * S2[(size_t) (i * ny + j) * nz + (size_t) cysum(k, 2, nz)] + 45.0 * S2[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] - 45.0 * S2[(size_t) (i * ny + j) * (size_t) nz + cysum(k, -1, nz)] + 9.0 * S2[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -2, nz)] - 1.0 * S2[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -3, nz)]) / (60.0 * Lc); phizz = (1.0 * S3[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 3, nz)] - 9.0 * S3[(size_t) (i * ny + j) * nz + (size_t) cysum(k, 2, nz)] + 45.0 * S3[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] - 45.0 * S3[(size_t) (i * ny + j) * (size_t) nz + cysum(k, -1, nz)] + 9.0 * S3[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -2, nz)] - 1.0 * S3[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -3, nz)]) / (60.0 * Lc); delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) k] = 1.0 * (phixx * phiyy + phixx * phizz + phiyy * phizz - pow(phixy, 2.0) - pow(phixz, 2.0) - pow(phiyz, 2.0)); } /* Go to fourier space to solve the posson equation */ fftwf_execute(p2); /* Divide the fourier space density by the green's function */ #pragma omp parallel for private(i, j, k, kx, ky, kz, fact, factx, facty, factz) for (i = 0; i < nx; i++) { if (2 * i < nx) kx = i * klx; else kx = (i - nx) * klx; factx = 1.0 / 90.0 * (2.0 * cos(3.0 * kx * Lc) - 27.0 * cos(2.0 * kx * Lc) + 270.0 * cos(kx * Lc) - 245.0) / (Lc * Lc); for (j = 0; j < ny; j++) { if (2 * j < ny) ky = j * kly; else ky = (j - ny) * kly; facty = 1.0 / 90.0 * (2.0 * cos(3.0 * ky * Lc) - 27.0 * cos(2.0 * ky * Lc) + 270.0 * cos(ky * Lc) - 245.0) / (Lc * Lc); for (k = 0; k < nz2; k++) { kz = k * klz; if (k == nz / 2) kz = -(float)nz / 2.0 * klz; factz = 1.0 / 90.0 * (2.0 * cos(3.0 * kz * Lc) - 27.0 * cos(2.0 * kz * Lc) + 270.0 * cos(kz * Lc) - 245.0) / (Lc * Lc); size_t ind = (size_t) (i * ny + j) * (size_t) nz2 + (size_t) k; if (kx != 0.0 || ky != 0.0 || kz != 0.0) { fact = factx + facty + factz; deltak[ind][0] = deltak[ind][0] / fact * Normk; deltak[ind][1] = deltak[ind][1] / fact * Normk; } else { deltak[ind][0] = 0.0; deltak[ind][1] = 0.0; } } } } /* Come back to real space */ fftwf_execute(p1); /* Open the output file for the displacement field */ if (out_inter == 2 || out_inter == 3) { printf("Saving the displaced particles\n"); disp_cat = fopen(dispfile, "wb"); if (disp_cat == NULL) { printf("Unable to open %s\n", dispfile); exit(0); } fwrite(&nx, sizeof(int), 1, disp_cat); fwrite(&ny, sizeof(int), 1, disp_cat); fwrite(&nz, sizeof(int), 1, disp_cat); fwrite(&Lc, sizeof(float), 1, disp_cat); } /* Compute the second order displacements and velocities */ #pragma omp parallel for private(i, j, k, kx, ky, kz, tmp) for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; /* save the displacement field */ if (out_inter == 2 || out_inter == 3) { kx = (1.0 * delta[(size_t) (cysum(i, 3, nx) * ny + j) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (cysum(i, 2, nx) * ny + j) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (cysum(i, 1, nx) * ny + j) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (cysum(i, -1, nx) * ny + j) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (cysum(i, -2, nx) * ny + j) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (cysum(i, -3, nx) * ny + j) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); ky = (1.0 * delta[(size_t) (i * ny + cysum(j, 3, ny)) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (i * ny + cysum(j, 2, ny)) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (i * nx + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (i * ny + cysum(j, -2, ny)) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (i * ny + cysum(j, -3, ny)) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); kz = (1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 3, nz)] - 9.0 * delta[(size_t) (i * ny + j) * nz + (size_t) cysum(k, 2, nz)] + 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] - 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + cysum(k, -1, nz)] + 9.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -2, nz)] - 1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -3, nz)]) * Normx / (60.0 * Lc); fwrite(&S1[ind], sizeof(float), 1, disp_cat); fwrite(&S2[ind], sizeof(float), 1, disp_cat); fwrite(&S3[ind], sizeof(float), 1, disp_cat); fwrite(&kx, sizeof(float), 1, disp_cat); fwrite(&ky, sizeof(float), 1, disp_cat); fwrite(&kz, sizeof(float), 1, disp_cat); if (out_halos != 0) { tmp = flag[ind]; if (tmp < 0) continue; kx = -3.0 / 7.0 * pow(Omz, -1.0 / 143) * kx; ky = -3.0 / 7.0 * pow(Omz, -1.0 / 143) * ky; kz = -3.0 / 7.0 * pow(Omz, -1.0 / 143) * kz; posh[tmp][0] += kx; posh[tmp][1] += ky; posh[tmp][2] += kz; //velh[tmp][0] += 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * kx; //velh[tmp][1] += 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * ky; //velh[tmp][2] += 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * kz; } } /* Do not save the displacements */ else if (out_halos != 0) { tmp = flag[ind]; if (tmp < 0) continue; kx = (1.0 * delta[(size_t) (cysum(i, 3, nx) * ny + j) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (cysum(i, 2, nx) * ny + j) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (cysum(i, 1, nx) * ny + j) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (cysum(i, -1, nx) * ny + j) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (cysum(i, -2, nx) * ny + j) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (cysum(i, -3, nx) * ny + j) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); ky = (1.0 * delta[(size_t) (i * ny + cysum(j, 3, ny)) * (size_t) nz + (size_t) k] - 9.0 * delta[(size_t) (i * ny + cysum(j, 2, ny)) * (size_t) nz + (size_t) k] + 45.0 * delta[(size_t) (i * ny + cysum(j, 1, ny)) * (size_t) nz + (size_t) k] - 45.0 * delta[(size_t) (i * nx + cysum(j, -1, ny)) * (size_t) nz + (size_t) k] + 9.0 * delta[(size_t) (i * ny + cysum(j, -2, ny)) * (size_t) nz + (size_t) k] - 1.0 * delta[(size_t) (i * ny + cysum(j, -3, ny)) * (size_t) nz + (size_t) k]) * Normx / (60.0 * Lc); kz = (1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 3, nz)] - 9.0 * delta[(size_t) (i * ny + j) * nz + (size_t) cysum(k, 2, nz)] + 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, 1, nz)] - 45.0 * delta[(size_t) (i * ny + j) * (size_t) nz + cysum(k, -1, nz)] + 9.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -2, nz)] - 1.0 * delta[(size_t) (i * ny + j) * (size_t) nz + (size_t) cysum(k, -3, nz)]) * Normx / (60.0 * Lc); kx = -3.0 / 7.0 * pow(Omz, -1.0 / 143) * kx; ky = -3.0 / 7.0 * pow(Omz, -1.0 / 143) * ky; kz = -3.0 / 7.0 * pow(Omz, -1.0 / 143) * kz; posh[tmp][0] += kx; posh[tmp][1] += ky; posh[tmp][2] += kz; //velh[tmp][0] += 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * kx; //velh[tmp][1] += 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * ky; //velh[tmp][2] += 2.0 * pow(Omz, 6.0 / 11.0) * Hz / (1.0 + redshift) * kz; } } if (out_inter == 2 || out_inter == 3) fclose(disp_cat); /* Free the FFTW memory */ fftwf_destroy_plan(p2); free(S1); free(S2); free(S3); } fftwf_destroy_plan(p1); fftwf_free(deltak); } fftwf_free(delta); if (out_collapse == 0 && out_halos != 0) free(flag); /* Compute the final position and velocity of the halos */ if (out_halos != 0) { for (i = 0; i < nh; i++) { posh[i][0] = cysumf(halos[i].x[0] * Lc + Lc / 2.0, posh[i][0] / halos[i].cont, Lx); posh[i][1] = cysumf(halos[i].x[1] * Lc + Lc / 2.0, posh[i][1] / halos[i].cont, Ly); posh[i][2] = cysumf(halos[i].x[2] * Lc + Lc / 2.0, posh[i][2] / halos[i].cont, Lz); velh[i][0] = velh[i][0] / halos[i].cont; velh[i][1] = velh[i][1] / halos[i].cont; velh[i][2] = velh[i][2] / halos[i].cont; } } /* Saving the positions and velocities in real space */ if (out_halos == 1) { printf("Saving the halos\n"); halo_cat = fopen(halofile, "w"); if (halo_cat == NULL) { printf("Unable to open %s\n", halofile); exit(0); } fprintf(halo_cat, "%ld\n", nh); for (i = 0; i < nh; i++) { fprintf(halo_cat, "%f %f %f %f %f %f %e %d\n", posh[i][0], posh[i][1], posh[i][2], velh[i][0], velh[i][1], velh[i][2], Massh[i], halos[i].cont); } fclose(halo_cat); } /* Putting galaxies in the halos */ if (DO_HOD == 1) { printf("Saving the galaxies\n"); sprintf(halofile, "%s_gals.dat", argv[14]); halo_cat = fopen(halofile, "w"); if (halo_cat == NULL) { printf("Unable to open %s\n", halofile); exit(0); } cont = 0; for (i = 0; i < nh; i++) { /* Compute the number of central and satellite galaxies */ if (Ncentral(Massh[i], logMmin, siglogM) >= gsl_rng_uniform(rng_ptr)) Ncen = 1; else Ncen = 0; Nsat = gsl_ran_poisson(rng_ptr, (double)Nsatellite(Massh[i], logM0, logM1, alpha)); Ngals = Ncen + Nsat; if (Ngals == 0) continue; /* Save the central galaxy */ if (Ncen == 1) { fprintf(halo_cat, "%f %f %f %f %f %f %d\n", posh[i][0], posh[i][1], posh[i][2], velh[i][0], velh[i][1], velh[i][2], i); cont++; } /* Put the satellite galaxies following the NFW profile */ if (Nsat > 0) { Rv = pow(3.0 * Massh[i] / (4.0 * M_PI * Dv * rhom), 1.0 / 3.0); C = f_c(Massh[i], (float)Mstar, z); A = log(1.0 + C) - C / (1.0 + C); } for (j = 0; j < Nsat; j++) { phi = 2.0 * M_PI * gsl_rng_uniform(rng_ptr); theta = M_PI * gsl_rng_uniform(rng_ptr); r = Generate_NFW(Rv, C, A, seed); kx = cysumf(posh[i][0], r * sin(theta) * cos(phi), Lx); ky = cysumf(posh[i][1], r * sin(theta) * sin(phi), Ly); kz = cysumf(posh[i][2], r * cos(theta), Lz); fprintf(halo_cat, "%f %f %f ", kx, ky, kz); kx = velh[i][0]; ky = velh[i][1]; kz = velh[i][2]; fprintf(halo_cat, "%f %f %f %d\n", kx, ky, kz, i); cont++; } } fclose(halo_cat); n_bar = cont / (Lx * Ly * Lz); printf("n_bar = %f\n", n_bar); } /********************************/ /* Put the halos in the lightcone */ /********************************/ if (out_halos == 2 || out_halos == 3) { printf("\nPutting the halos in the light cone!\n"); printf("The code is using (%d, %d, %d) replicas to construct the light cone.\n", Nrep_x, Nrep_y, Nrep_z); printf("This snapshot is in the range %f - %f [Mpc/h] with theta_min = %f.\n", dist_min, dist_max, theta_min); /* Open the light cone file */ light_cat = fopen(lightfile, "wb"); if (light_cat == NULL) { printf("Unable to open %s\n", lightfile); exit(0); } cont = 0; fwrite(&cont, sizeof(long), 1, light_cat); /* Run over all the halos and save then in the light cone file */ for (l = 0; l < nh; l++) { for (i = -Nrep_x; i <= Nrep_x; i++) for (j = -Nrep_y; j <= Nrep_y; j++) for (k = -Nrep_z; k <= Nrep_z; k++) { /* Compute the distance for this replic */ Pos[0] = posh[l][0] + Lx * i - Pobs[0]; Pos[1] = posh[l][1] + Ly * j - Pobs[1]; Pos[2] = posh[l][2] + Lz * k - Pobs[2]; dist = 0.0; for (m = 0; m < 3; m++) dist += Pos[m] * Pos[m]; dist = sqrt(dist); if (out_halos == 3) { /* Compute the distance in redshift space */ vr = 0.0; for (m = 0; m < 3; m++) vr += velh[l][m] * Pos[m]; vr = vr / dist; for (m = 0; m < 3; m++) Pos[m] = Pos[m] + vr / Hz * (1.0 + redshift) * Pos[m] / dist; dist = dist + vr / Hz * (1.0 + redshift); } if (dist <= dist_min || dist > dist_max) continue; /* Compute the angle theta */ cost = 0.0; for (m = 0; m < 3; m++) cost += Pos[m] * LoS[m]; cost = cost / dist; if (cost < cos_min) continue; /* Save the information about this halo */ fwrite(&Pos[0], sizeof(float), 1, light_cat); fwrite(&Pos[1], sizeof(float), 1, light_cat); fwrite(&Pos[2], sizeof(float), 1, light_cat); fwrite(&velh[l][0], sizeof(float), 1, light_cat); fwrite(&velh[l][1], sizeof(float), 1, light_cat); fwrite(&velh[l][2], sizeof(float), 1, light_cat); fwrite(&Massh[l], sizeof(float), 1, light_cat); cont++; /* Put galaxies in this halo (one type) */ if (DO_HOD == 1) { /* * Compute the number of central and satellite * galaxies */ if (Ncentral(Massh[l], logMmin, siglogM) >= gsl_rng_uniform(rng_ptr)) Ncen = 1; else Ncen = 0; Nsat = gsl_ran_poisson(rng_ptr, (double)Nsatellite(Massh[l], logM0, logM1, alpha)); Ngals = Ncen + Nsat; /* Save the total number of galaxies */ fwrite(&Ngals, sizeof(int), 1, light_cat); /* Save the central galaxy */ fwrite(&Pos[0], sizeof(float), 1, light_cat); fwrite(&Pos[1], sizeof(float), 1, light_cat); fwrite(&Pos[2], sizeof(float), 1, light_cat); fwrite(&velh[l][0], sizeof(float), 1, light_cat); fwrite(&velh[l][1], sizeof(float), 1, light_cat); fwrite(&velh[l][2], sizeof(float), 1, light_cat); /* * Put the satellite galaxies following the NFW * profile */ if (Nsat > 0) { Rv = pow(3.0 * Massh[l] / (4.0 * M_PI * Dv * rhom), 1.0 / 3.0); C = f_c(Massh[l], (float)Mstar, z); A = log(1.0 + C) - C / (1.0 + C); } for (m = 0; m < Nsat; m++) { phi = 2.0 * M_PI * gsl_rng_uniform(rng_ptr); theta = M_PI * gsl_rng_uniform(rng_ptr); r = Generate_NFW(Rv, C, A, seed); kx = cysumf(Pos[0], r * sin(theta) * cos(phi), Lx); ky = cysumf(Pos[1], r * sin(theta) * sin(phi), Ly); kz = cysumf(Pos[2], r * cos(theta), Lz); fwrite(&kx, sizeof(float), 1, light_cat); fwrite(&ky, sizeof(float), 1, light_cat); fwrite(&kz, sizeof(float), 1, light_cat); kx = velh[i][0]; ky = velh[i][1]; kz = velh[i][2]; fwrite(&kx, sizeof(float), 1, light_cat); fwrite(&ky, sizeof(float), 1, light_cat); fwrite(&kz, sizeof(float), 1, light_cat); } } } } rewind(light_cat); fwrite(&cont, sizeof(long), 1, light_cat); fclose(light_cat); if (out_collapse == 1) { /* * Open the file to save the information about the collapsed * particles */ collapse_cat = fopen(collapsefile, "wb"); if (collapse_cat == NULL) { printf("Unable to open %s\n", collapsefile); exit(0); } /* Save the information about the colapsed particles */ int a, b, c; cont = 0; fwrite(&cont, sizeof(long), 1, collapse_cat); #pragma omp parallel for private(i, j, k, a, b, c, Pos, dist, cost, tmp) for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) for (k = 0; k < nz; k++) { size_t ind = (size_t) (i * ny + j) * (size_t) nz + (size_t) k; for (a = -Nrep_x; a <= Nrep_x; a++) for (b = -Nrep_y; b <= Nrep_y; b++) for (c = -Nrep_z; c <= Nrep_z; c++) { /* Compute the distance for this replic */ Pos[0] = i * Lc + Lc / 2.0 + Lx * a; Pos[1] = j * Lc + Lc / 2.0 + Ly * b; Pos[2] = k * Lc + Lc / 2.0 + Lz * c; dist = 0.0; for (m = 0; m < 3; m++) dist += Pos[m] * Pos[m]; dist = sqrt(dist); if (dist <= dist_min || dist > dist_max) continue; /* Compute the angle theta */ cost = 0.0; for (m = 0; m < 3; m++) cost += Pos[m] * LoS[m]; cost = cost / dist; if (cost < cos_min) continue; tmp = flag[ind]; cont++; fwrite(&ind, sizeof(size_t), 1, collapse_cat); fwrite(&tmp, sizeof(int), 1, collapse_cat); fwrite(&redshift, sizeof(float), 1, collapse_cat); } } rewind(collapse_cat); fwrite(&cont, sizeof(long), 1, collapse_cat); fclose(collapse_cat); free(flag); } } /*******************/ /* Free the memory */ /*******************/ gsl_spline_free(spline_I); gsl_spline_free(spline_InvI); gsl_interp_accel_free(acc_I); gsl_interp_accel_free(acc_InvI); if (out_halos != 0) { free(Massh); for (i = 0; i < nh; i++) { free(velh[i]); free(posh[i]); } /* Free the rest */ free(velh); free(posh); free(halos); gsl_rng_free(rng_ptr); } return 0; }
GB_binop__minus_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__minus_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__minus_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__minus_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_uint64) // A*D function (colscale): GB (_AxD__minus_uint64) // D*A function (rowscale): GB (_DxB__minus_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__minus_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__minus_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_uint64) // C=scalar+B GB (_bind1st__minus_uint64) // C=scalar+B' GB (_bind1st_tran__minus_uint64) // C=A+scalar GB (_bind2nd__minus_uint64) // C=A'+scalar GB (_bind2nd_tran__minus_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_UINT64 || GxB_NO_MINUS_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__minus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__minus_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__minus_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__minus_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_uint64) // A*D function (colscale): GB (_AxD__minus_uint64) // D*A function (rowscale): GB (_DxB__minus_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__minus_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__minus_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_uint64) // C=scalar+B GB (_bind1st__minus_uint64) // C=scalar+B' GB (_bind1st_tran__minus_uint64) // C=A+scalar GB (_bind2nd__minus_uint64) // C=A'+scalar GB (_bind2nd_tran__minus_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_UINT64 || GxB_NO_MINUS_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__minus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__minus_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__minus_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__minus_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_uint64) // A*D function (colscale): GB (_AxD__minus_uint64) // D*A function (rowscale): GB (_DxB__minus_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__minus_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__minus_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_uint64) // C=scalar+B GB (_bind1st__minus_uint64) // C=scalar+B' GB (_bind1st_tran__minus_uint64) // C=A+scalar GB (_bind2nd__minus_uint64) // C=A'+scalar GB (_bind2nd_tran__minus_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_UINT64 || GxB_NO_MINUS_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__minus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_sgemm_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static inline signed char float2int8(float v) { int int32 = round(v); if (int32 > 127) return 127; if (int32 < -127) return -127; return (signed char)int32; } static void conv_im2col_sgemm_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; // im2row Mat bottom_im2row(kernel_h*kernel_w*inch, outw*outh, 1UL, opt.workspace_allocator); { signed char* ret = (signed char*)bottom_im2row; int retID = 0; for (int i=0; i<outh; i++) { for (int j=0; j<outw; j++) { for (int p=0; p<inch; p++) { const signed char* input = bottom_blob.channel(p); for (int u=0; u<kernel_h; u++) { for (int v=0; v<kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // int M = outch; // outch int N = outw * outh; // outsize or out stride int K = kernel_w * kernel_h * inch; // ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4*kernel_size, inch, out_size/4 + out_size%4, (size_t)1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_im2row.row<signed char>(i); const signed char* img1 = bottom_im2row.row<signed char>(i+1); const signed char* img2 = bottom_im2row.row<signed char>(i+2); const signed char* img3 = bottom_im2row.row<signed char>(i+3); signed char* tmpptr = bottom_tm.channel(i/4); int q = 0; for (; q+1<inch*kernel_size; q=q+2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<out_size; i++) { const signed char* img0 = bottom_im2row.row<signed char>(i); signed char* tmpptr = bottom_tm.channel(i/4 + i%4); int q=0; for (; q+1<inch*kernel_size; q=q+2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } // kernel memory packed 4 x 4 Mat kernel_tm(4*kernel_size, inch, outch/4 + outch%4, (size_t)1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; const signed char* k0 = kernel + (p+0)*inch*kernel_size; const signed char* k1 = kernel + (p+1)*inch*kernel_size; const signed char* k2 = kernel + (p+2)*inch*kernel_size; const signed char* k3 = kernel + (p+3)*inch*kernel_size; signed char* ktmp = kernel_tm.channel(p/4); int q=0; for (; q+1<inch*kernel_size; q+=2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { const signed char* k0 = kernel + (p+0)*inch*kernel_size; signed char* ktmp = kernel_tm.channel(p/4 + p%4); int q=0; for (; q+1<inch*kernel_size; q=q+2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } // 4x4 // sgemm(int M, int N, int K, float* A, float* B, float* C) { // int M = outch; // outch // int N = outw * outh; // outsize or out stride // int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 4; int* output0 = top_blob.channel(i); int* output1 = top_blob.channel(i+1); int* output2 = top_blob.channel(i+2); int* output3 = top_blob.channel(i+3); int j=0; for (; j+3<N; j=j+4) { signed char* vb = bottom_tm.channel(j/4); signed char* va = kernel_tm.channel(i/4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k=0; for (; k+1<K; k=k+2) { for (int n=0; n<4; n++) { sum0[n] += (int)va[0] * vb[2*n]; // k0 sum0[n] += (int)va[1] * vb[2*n+1]; sum1[n] += (int)va[2] * vb[2*n]; // k1 sum1[n] += (int)va[3] * vb[2*n+1]; sum2[n] += (int)va[4] * vb[2*n]; // k2 sum2[n] += (int)va[5] * vb[2*n+1]; sum3[n] += (int)va[6] * vb[2*n]; // k3 sum3[n] += (int)va[7] * vb[2*n+1]; } va += 8; vb += 8; } for (; k<K; k++) { for (int n=0; n<4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n=0; n<4; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j<N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char* vb = bottom_tm.channel(j/4 + j%4); signed char* va = kernel_tm.channel(i/4); int k=0; for (; k+1<K; k=k+2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k<K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_outch_start; i<outch; i++) { int* output = top_blob.channel(i); int j=0; for (; j+3<N; j=j+4) { signed char* vb = bottom_tm.channel(j/4); signed char* va = kernel_tm.channel(i/4 + i%4); int sum[4] = {0}; int k=0; for (; k+1<K; k=k+2) { for (int n=0; n<4; n++) { sum[n] += (int)va[0] * vb[2*n]; sum[n] += (int)va[1] * vb[2*n+1]; } va += 2; vb += 8; } for (; k<K; k++) { for (int n=0; n<4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n=0; n<4; n++) { output[n] = sum[n]; } output += 4; } for (; j<N; j++) { int sum = 0; signed char* vb = bottom_tm.channel(j/4 + j%4); signed char* va = kernel_tm.channel(i/4 + i%4); for (int k=0; k<K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum; output++; } } } // // sgemm(int M, int N, int K, float* A, float* B, float* C) // { // for (int i=0; i<M; i++) // { // int* output = top_blob.channel(i); // for (int j=0; j<N; j++) // { // int sum = 0; // signed char* vb = (signed char*)bottom_im2row + K * j; // const signed char* va = kernel + K * i; // for (int k=0; k<K; k++) // { // sum += (int)va[0] * vb[0]; // va += 1; // vb += 1; // } // output[0] = sum; // output++; // } // } // } } static void conv_im2col_sgemm_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat &_bias, std::vector<float> scale_dequant, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; const float* bias = _bias; // im2row Mat bottom_im2row(kernel_h*kernel_w*inch, outw*outh, 1UL, opt.workspace_allocator); { signed char* ret = (signed char*)bottom_im2row; int retID = 0; for (int i=0; i<outh; i++) { for (int j=0; j<outw; j++) { for (int p=0; p<inch; p++) { const signed char* input = bottom_blob.channel(p); for (int u=0; u<kernel_h; u++) { for (int v=0; v<kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // int M = outch; // outch int N = outw * outh; // outsize or out stride int K = kernel_w * kernel_h * inch; // ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4*kernel_size, inch, out_size/4 + out_size%4, (size_t)1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_im2row.row<signed char>(i); const signed char* img1 = bottom_im2row.row<signed char>(i+1); const signed char* img2 = bottom_im2row.row<signed char>(i+2); const signed char* img3 = bottom_im2row.row<signed char>(i+3); signed char* tmpptr = bottom_tm.channel(i/4); int q = 0; for (; q+1<inch*kernel_size; q=q+2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<out_size; i++) { const signed char* img0 = bottom_im2row.row<signed char>(i); signed char* tmpptr = bottom_tm.channel(i/4 + i%4); int q=0; for (; q+1<inch*kernel_size; q=q+2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } // kernel memory packed 4 x 4 Mat kernel_tm(4*kernel_size, inch, outch/4 + outch%4, (size_t)1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; const signed char* k0 = kernel + (p+0)*inch*kernel_size; const signed char* k1 = kernel + (p+1)*inch*kernel_size; const signed char* k2 = kernel + (p+2)*inch*kernel_size; const signed char* k3 = kernel + (p+3)*inch*kernel_size; signed char* ktmp = kernel_tm.channel(p/4); int q=0; for (; q+1<inch*kernel_size; q+=2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { const signed char* k0 = kernel + (p+0)*inch*kernel_size; signed char* ktmp = kernel_tm.channel(p/4 + p%4); int q=0; for (; q+1<inch*kernel_size; q=q+2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } // 4x4 // sgemm(int M, int N, int K, float* A, float* B, float* C) { // int M = outch; // outch // int N = outw * outh; // outsize or out stride // int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 4; const float bias0 = bias ? bias[i] : 0.f; const float bias1 = bias ? bias[i+1] : 0.f; const float bias2 = bias ? bias[i+2] : 0.f; const float bias3 = bias ? bias[i+3] : 0.f; const float scale_dequant0 = scale_dequant[i]; const float scale_dequant1 = scale_dequant[i+1]; const float scale_dequant2 = scale_dequant[i+2]; const float scale_dequant3 = scale_dequant[i+3]; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); int j=0; for (; j+3<N; j=j+4) { signed char* vb = bottom_tm.channel(j/4); signed char* va = kernel_tm.channel(i/4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k=0; for (; k+1<K; k=k+2) { for (int n=0; n<4; n++) { sum0[n] += (int)va[0] * vb[2*n]; // k0 sum0[n] += (int)va[1] * vb[2*n+1]; sum1[n] += (int)va[2] * vb[2*n]; // k1 sum1[n] += (int)va[3] * vb[2*n+1]; sum2[n] += (int)va[4] * vb[2*n]; // k2 sum2[n] += (int)va[5] * vb[2*n+1]; sum3[n] += (int)va[6] * vb[2*n]; // k3 sum3[n] += (int)va[7] * vb[2*n+1]; } va += 8; vb += 8; } for (; k<K; k++) { for (int n=0; n<4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n=0; n<4; n++) { output0[n] = (float)sum0[n] * scale_dequant0 + bias0; output1[n] = (float)sum1[n] * scale_dequant1 + bias1; output2[n] = (float)sum2[n] * scale_dequant2 + bias2; output3[n] = (float)sum3[n] * scale_dequant3 + bias3; } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j<N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char* vb = bottom_tm.channel(j/4 + j%4); signed char* va = kernel_tm.channel(i/4); int k=0; for (; k+1<K; k=k+2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k<K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = (float)sum0 * scale_dequant0 + bias0; output1[0] = (float)sum1 * scale_dequant1 + bias1; output2[0] = (float)sum2 * scale_dequant2 + bias2; output3[0] = (float)sum3 * scale_dequant3 + bias3; output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_outch_start; i<outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; const float scale_dequant0 = scale_dequant[i]; int j=0; for (; j+3<N; j=j+4) { signed char* vb = bottom_tm.channel(j/4); signed char* va = kernel_tm.channel(i/4 + i%4); int sum[4] = {0}; int k=0; for (; k+1<K; k=k+2) { for (int n=0; n<4; n++) { sum[n] += (int)va[0] * vb[2*n]; sum[n] += (int)va[1] * vb[2*n+1]; } va += 2; vb += 8; } for (; k<K; k++) { for (int n=0; n<4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n=0; n<4; n++) { output[n] = (float)sum[n] * scale_dequant0 + bias0; } output += 4; } for (; j<N; j++) { int sum = 0; signed char* vb = bottom_tm.channel(j/4 + j%4); signed char* va = kernel_tm.channel(i/4 + i%4); for (int k=0; k<K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = (float)sum * scale_dequant0 + bias0; output++; } } } // // sgemm(int M, int N, int K, float* A, float* B, float* C) // { // for (int i=0; i<M; i++) // { // int* output = top_blob.channel(i); // for (int j=0; j<N; j++) // { // int sum = 0; // signed char* vb = (signed char*)bottom_im2row + K * j; // const signed char* va = kernel + K * i; // for (int k=0; k<K; k++) // { // sum += (int)va[0] * vb[0]; // va += 1; // vb += 1; // } // output[0] = sum; // output++; // } // } // } } static void conv_im2col_sgemm_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat &_bias, std::vector<float> scale_requant, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; const float* bias = _bias; // im2row Mat bottom_im2row(kernel_h*kernel_w*inch, outw*outh, 1UL, opt.workspace_allocator); { signed char* ret = (signed char*)bottom_im2row; int retID = 0; for (int i=0; i<outh; i++) { for (int j=0; j<outw; j++) { for (int p=0; p<inch; p++) { const signed char* input = bottom_blob.channel(p); for (int u=0; u<kernel_h; u++) { for (int v=0; v<kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // int M = outch; // outch int N = outw * outh; // outsize or out stride int K = kernel_w * kernel_h * inch; // ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4*kernel_size, inch, out_size/4 + out_size%4, (size_t)1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_im2row.row<signed char>(i); const signed char* img1 = bottom_im2row.row<signed char>(i+1); const signed char* img2 = bottom_im2row.row<signed char>(i+2); const signed char* img3 = bottom_im2row.row<signed char>(i+3); signed char* tmpptr = bottom_tm.channel(i/4); int q = 0; for (; q+1<inch*kernel_size; q=q+2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<out_size; i++) { const signed char* img0 = bottom_im2row.row<signed char>(i); signed char* tmpptr = bottom_tm.channel(i/4 + i%4); int q=0; for (; q+1<inch*kernel_size; q=q+2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } // kernel memory packed 4 x 4 Mat kernel_tm(4*kernel_size, inch, outch/4 + outch%4, (size_t)1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; const signed char* k0 = kernel + (p+0)*inch*kernel_size; const signed char* k1 = kernel + (p+1)*inch*kernel_size; const signed char* k2 = kernel + (p+2)*inch*kernel_size; const signed char* k3 = kernel + (p+3)*inch*kernel_size; signed char* ktmp = kernel_tm.channel(p/4); int q=0; for (; q+1<inch*kernel_size; q+=2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { const signed char* k0 = kernel + (p+0)*inch*kernel_size; signed char* ktmp = kernel_tm.channel(p/4 + p%4); int q=0; for (; q+1<inch*kernel_size; q=q+2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } // 4x4 // sgemm(int M, int N, int K, float* A, float* B, float* C) { // int M = outch; // outch // int N = outw * outh; // outsize or out stride // int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 4; signed char* output0 = top_blob.channel(i); signed char* output1 = top_blob.channel(i+1); signed char* output2 = top_blob.channel(i+2); signed char* output3 = top_blob.channel(i+3); const float bias0 = bias ? bias[i] : 0.f; const float bias1 = bias ? bias[i+1] : 0.f; const float bias2 = bias ? bias[i+2] : 0.f; const float bias3 = bias ? bias[i+3] : 0.f; const float scale_requant_in0 = scale_requant[2*i]; const float scale_requant_out0 = scale_requant[2*i+1]; const float scale_requant_in1 = scale_requant[2*(i+1)]; const float scale_requant_out1 = scale_requant[2*(i+1)+1]; const float scale_requant_in2 = scale_requant[2*(i+2)]; const float scale_requant_out2 = scale_requant[2*(i+2)+1]; const float scale_requant_in3 = scale_requant[2*(i+3)]; const float scale_requant_out3 = scale_requant[2*(i+3)+1]; int j=0; for (; j+3<N; j=j+4) { signed char* vb = bottom_tm.channel(j/4); signed char* va = kernel_tm.channel(i/4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k=0; for (; k+1<K; k=k+2) { for (int n=0; n<4; n++) { sum0[n] += (int)va[0] * vb[2*n]; // k0 sum0[n] += (int)va[1] * vb[2*n+1]; sum1[n] += (int)va[2] * vb[2*n]; // k1 sum1[n] += (int)va[3] * vb[2*n+1]; sum2[n] += (int)va[4] * vb[2*n]; // k2 sum2[n] += (int)va[5] * vb[2*n+1]; sum3[n] += (int)va[6] * vb[2*n]; // k3 sum3[n] += (int)va[7] * vb[2*n+1]; } va += 8; vb += 8; } for (; k<K; k++) { for (int n=0; n<4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n=0; n<4; n++) { output0[n] = float2int8(((float)sum0[n] * scale_requant_in0 + bias0) * scale_requant_out0); output1[n] = float2int8(((float)sum1[n] * scale_requant_in1 + bias1) * scale_requant_out1); output2[n] = float2int8(((float)sum2[n] * scale_requant_in2 + bias2) * scale_requant_out2); output3[n] = float2int8(((float)sum3[n] * scale_requant_in3 + bias3) * scale_requant_out3); } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j<N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char* vb = bottom_tm.channel(j/4 + j%4); signed char* va = kernel_tm.channel(i/4); int k=0; for (; k+1<K; k=k+2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k<K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0); output1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1); output2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2); output3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3); output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_outch_start; i<outch; i++) { signed char* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; const float scale_requant_in0 = scale_requant[2*i]; const float scale_requant_out0 = scale_requant[2*i+1]; int j=0; for (; j+3<N; j=j+4) { signed char* vb = bottom_tm.channel(j/4); signed char* va = kernel_tm.channel(i/4 + i%4); int sum[4] = {0}; int k=0; for (; k+1<K; k=k+2) { for (int n=0; n<4; n++) { sum[n] += (int)va[0] * vb[2*n]; sum[n] += (int)va[1] * vb[2*n+1]; } va += 2; vb += 8; } for (; k<K; k++) { for (int n=0; n<4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n=0; n<4; n++) { output[n] = float2int8(((float)sum[n] * scale_requant_in0 + bias0) * scale_requant_out0); } output += 4; } for (; j<N; j++) { int sum = 0; signed char* vb = bottom_tm.channel(j/4 + j%4); signed char* va = kernel_tm.channel(i/4 + i%4); for (int k=0; k<K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = float2int8(((float)sum * scale_requant_in0 + bias0) * scale_requant_out0); output++; } } } // // sgemm(int M, int N, int K, float* A, float* B, float* C) // { // for (int i=0; i<M; i++) // { // int* output = top_blob.channel(i); // for (int j=0; j<N; j++) // { // int sum = 0; // signed char* vb = (signed char*)bottom_im2row + K * j; // const signed char* va = kernel + K * i; // for (int k=0; k<K; k++) // { // sum += (int)va[0] * vb[0]; // va += 1; // vb += 1; // } // output[0] = sum; // output++; // } // } // } }
// BUG1989 is pleased to support the open source community by supporting ncnn available. // //Copyright(C) 2019 BUG1989.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static inline signed char float2int8(float v) { int int32 = round(v); if (int32 > 127) return 127; if (int32 < -127) return -127; return (signed char)int32; } static void conv_im2col_sgemm_int8_sse(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option & opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; //im2row Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator); { signed char *ret = (signed char *)bottom_im2row; int retID = 0; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { for (int p = 0; p < inch; p++) { const signed char *input = bottom_blob.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; //int M = outch; //outch int N = outw * outh; //outsize or out stride int K = kernel_w * kernel_h * inch; //ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t) 1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char *img0 = bottom_im2row.row < signed char >(i); const signed char *img1 = bottom_im2row.row < signed char >(i + 1); const signed char *img2 = bottom_im2row.row < signed char >(i + 2); const signed char *img3 = bottom_im2row.row < signed char >(i + 3); signed char *tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } for (int i = remain_size_start; i < out_size; i++) { const signed char *img0 = bottom_im2row.row < signed char >(i); signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } //kernel memory packed 4 x 4 Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t) 1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char *k0 = kernel + (p + 0) * inch * kernel_size; const signed char *k1 = kernel + (p + 1) * inch * kernel_size; const signed char *k2 = kernel + (p + 2) * inch * kernel_size; const signed char *k3 = kernel + (p + 3) * inch * kernel_size; signed char *ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch * kernel_size; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } for (int p = remain_outch_start; p < outch; p++) { const signed char *k0 = kernel + (p + 0) * inch * kernel_size; signed char *ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } //4 x4 // sgemm(int M, int N, int K, float *A, float *B, float *C) { //int M = outch; //outch // int N = outw * outh; //outsize or out stride // int L = kernel_w * kernel_h * inch; //ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 4; int *output0 = top_blob.channel(i); int *output1 = top_blob.channel(i + 1); int *output2 = top_blob.channel(i + 2); int *output3 = top_blob.channel(i + 3); int j = 0; for (; j + 3 < N; j = j + 4) { signed char *vb = bottom_tm.channel(j / 4); signed char *va = kernel_tm.channel(i / 4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[2 * n]; //k0 sum0[n] += (int)va[1] * vb[2 * n + 1]; sum1[n] += (int)va[2] * vb[2 * n]; //k1 sum1[n] += (int)va[3] * vb[2 * n + 1]; sum2[n] += (int)va[4] * vb[2 * n]; //k2 sum2[n] += (int)va[5] * vb[2 * n + 1]; sum3[n] += (int)va[6] * vb[2 * n]; //k3 sum3[n] += (int)va[7] * vb[2 * n + 1]; } va += 8; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char *vb = bottom_tm.channel(j / 4 + j % 4); signed char *va = kernel_tm.channel(i / 4); int k = 0; for (; k + 1 < K; k = k + 2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k < K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output0++; output1++; output2++; output3++; } } for (int i = remain_outch_start; i < outch; i++) { int *output = top_blob.channel(i); int j = 0; for (; j + 3 < N; j = j + 4) { signed char *vb = bottom_tm.channel(j / 4); signed char *va = kernel_tm.channel(i / 4 + i % 4); int sum[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[2 * n]; sum[n] += (int)va[1] * vb[2 * n + 1]; } va += 2; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = sum[n]; } output += 4; } for (; j < N; j++) { int sum = 0; signed char *vb = bottom_tm.channel(j / 4 + j % 4); signed char *va = kernel_tm.channel(i / 4 + i % 4); for (int k = 0; k < K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum; output++; } } } ////sgemm(int M, int N, int K, float *A, float *B, float *C) // { //for (int i = 0; i < M; i++) // { //int *output = top_blob.channel(i); //for (int j = 0; j < N; j++) // { //int sum = 0; //signed char *vb = (signed char *)bottom_im2row + K * j; //const signed char *va = kernel + K * i; //for (int k = 0; k < K; k++) // { //sum += (int)va[0] * vb[0]; //va += 1; //vb += 1; // } //output[0] = sum; //output++; // } // } // } } static void conv_im2col_sgemm_int8_dequant_sse(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat & _bias, std: :vector < float >scale_dequant, const Option & opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; const float *bias = _bias; //im2row Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator); { signed char *ret = (signed char *)bottom_im2row; int retID = 0; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { for (int p = 0; p < inch; p++) { const signed char *input = bottom_blob.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; //int M = outch; //outch int N = outw * outh; //outsize or out stride int K = kernel_w * kernel_h * inch; //ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t) 1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char *img0 = bottom_im2row.row < signed char >(i); const signed char *img1 = bottom_im2row.row < signed char >(i + 1); const signed char *img2 = bottom_im2row.row < signed char >(i + 2); const signed char *img3 = bottom_im2row.row < signed char >(i + 3); signed char *tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } for (int i = remain_size_start; i < out_size; i++) { const signed char *img0 = bottom_im2row.row < signed char >(i); signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } //kernel memory packed 4 x 4 Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t) 1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char *k0 = kernel + (p + 0) * inch * kernel_size; const signed char *k1 = kernel + (p + 1) * inch * kernel_size; const signed char *k2 = kernel + (p + 2) * inch * kernel_size; const signed char *k3 = kernel + (p + 3) * inch * kernel_size; signed char *ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch * kernel_size; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } for (int p = remain_outch_start; p < outch; p++) { const signed char *k0 = kernel + (p + 0) * inch * kernel_size; signed char *ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } //4 x4 // sgemm(int M, int N, int K, float *A, float *B, float *C) { //int M = outch; //outch // int N = outw * outh; //outsize or out stride // int L = kernel_w * kernel_h * inch; //ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 4; const float bias0 = bias ? bias[i] : 0. f; const float bias1 = bias ? bias[i + 1] : 0. f; const float bias2 = bias ? bias[i + 2] : 0. f; const float bias3 = bias ? bias[i + 3] : 0. f; const float scale_dequant0 = scale_dequant[i]; const float scale_dequant1 = scale_dequant[i + 1]; const float scale_dequant2 = scale_dequant[i + 2]; const float scale_dequant3 = scale_dequant[i + 3]; float *output0 = top_blob.channel(i); float *output1 = top_blob.channel(i + 1); float *output2 = top_blob.channel(i + 2); float *output3 = top_blob.channel(i + 3); int j = 0; for (; j + 3 < N; j = j + 4) { signed char *vb = bottom_tm.channel(j / 4); signed char *va = kernel_tm.channel(i / 4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[2 * n]; //k0 sum0[n] += (int)va[1] * vb[2 * n + 1]; sum1[n] += (int)va[2] * vb[2 * n]; //k1 sum1[n] += (int)va[3] * vb[2 * n + 1]; sum2[n] += (int)va[4] * vb[2 * n]; //k2 sum2[n] += (int)va[5] * vb[2 * n + 1]; sum3[n] += (int)va[6] * vb[2 * n]; //k3 sum3[n] += (int)va[7] * vb[2 * n + 1]; } va += 8; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = (float)sum0[n] * scale_dequant0 + bias0; output1[n] = (float)sum1[n] * scale_dequant1 + bias1; output2[n] = (float)sum2[n] * scale_dequant2 + bias2; output3[n] = (float)sum3[n] * scale_dequant3 + bias3; } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char *vb = bottom_tm.channel(j / 4 + j % 4); signed char *va = kernel_tm.channel(i / 4); int k = 0; for (; k + 1 < K; k = k + 2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k < K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = (float)sum0 *scale_dequant0 + bias0; output1[0] = (float)sum1 *scale_dequant1 + bias1; output2[0] = (float)sum2 *scale_dequant2 + bias2; output3[0] = (float)sum3 *scale_dequant3 + bias3; output0++; output1++; output2++; output3++; } } for (int i = remain_outch_start; i < outch; i++) { float *output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0. f; const float scale_dequant0 = scale_dequant[i]; int j = 0; for (; j + 3 < N; j = j + 4) { signed char *vb = bottom_tm.channel(j / 4); signed char *va = kernel_tm.channel(i / 4 + i % 4); int sum[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[2 * n]; sum[n] += (int)va[1] * vb[2 * n + 1]; } va += 2; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = (float)sum[n] * scale_dequant0 + bias0; } output += 4; } for (; j < N; j++) { int sum = 0; signed char *vb = bottom_tm.channel(j / 4 + j % 4); signed char *va = kernel_tm.channel(i / 4 + i % 4); for (int k = 0; k < K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = (float)sum *scale_dequant0 + bias0; output++; } } } ////sgemm(int M, int N, int K, float *A, float *B, float *C) // { //for (int i = 0; i < M; i++) // { //int *output = top_blob.channel(i); //for (int j = 0; j < N; j++) // { //int sum = 0; //signed char *vb = (signed char *)bottom_im2row + K * j; //const signed char *va = kernel + K * i; //for (int k = 0; k < K; k++) // { //sum += (int)va[0] * vb[0]; //va += 1; //vb += 1; // } //output[0] = sum; //output++; // } // } // } } static void conv_im2col_sgemm_int8_requant_sse(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat & _bias, std: :vector < float >scale_requant, const Option & opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; const float *bias = _bias; //im2row Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator); { signed char *ret = (signed char *)bottom_im2row; int retID = 0; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { for (int p = 0; p < inch; p++) { const signed char *input = bottom_blob.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; //int M = outch; //outch int N = outw * outh; //outsize or out stride int K = kernel_w * kernel_h * inch; //ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t) 1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char *img0 = bottom_im2row.row < signed char >(i); const signed char *img1 = bottom_im2row.row < signed char >(i + 1); const signed char *img2 = bottom_im2row.row < signed char >(i + 2); const signed char *img3 = bottom_im2row.row < signed char >(i + 3); signed char *tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } for (int i = remain_size_start; i < out_size; i++) { const signed char *img0 = bottom_im2row.row < signed char >(i); signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } //kernel memory packed 4 x 4 Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t) 1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char *k0 = kernel + (p + 0) * inch * kernel_size; const signed char *k1 = kernel + (p + 1) * inch * kernel_size; const signed char *k2 = kernel + (p + 2) * inch * kernel_size; const signed char *k3 = kernel + (p + 3) * inch * kernel_size; signed char *ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch * kernel_size; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } for (int p = remain_outch_start; p < outch; p++) { const signed char *k0 = kernel + (p + 0) * inch * kernel_size; signed char *ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } //4 x4 // sgemm(int M, int N, int K, float *A, float *B, float *C) { //int M = outch; //outch // int N = outw * outh; //outsize or out stride // int L = kernel_w * kernel_h * inch; //ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 4; signed char *output0 = top_blob.channel(i); signed char *output1 = top_blob.channel(i + 1); signed char *output2 = top_blob.channel(i + 2); signed char *output3 = top_blob.channel(i + 3); const float bias0 = bias ? bias[i] : 0. f; const float bias1 = bias ? bias[i + 1] : 0. f; const float bias2 = bias ? bias[i + 2] : 0. f; const float bias3 = bias ? bias[i + 3] : 0. f; const float scale_requant_in0 = scale_requant[2 * i]; const float scale_requant_out0 = scale_requant[2 * i + 1]; const float scale_requant_in1 = scale_requant[2 * (i + 1)]; const float scale_requant_out1 = scale_requant[2 * (i + 1) + 1]; const float scale_requant_in2 = scale_requant[2 * (i + 2)]; const float scale_requant_out2 = scale_requant[2 * (i + 2) + 1]; const float scale_requant_in3 = scale_requant[2 * (i + 3)]; const float scale_requant_out3 = scale_requant[2 * (i + 3) + 1]; int j = 0; for (; j + 3 < N; j = j + 4) { signed char *vb = bottom_tm.channel(j / 4); signed char *va = kernel_tm.channel(i / 4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[2 * n]; //k0 sum0[n] += (int)va[1] * vb[2 * n + 1]; sum1[n] += (int)va[2] * vb[2 * n]; //k1 sum1[n] += (int)va[3] * vb[2 * n + 1]; sum2[n] += (int)va[4] * vb[2 * n]; //k2 sum2[n] += (int)va[5] * vb[2 * n + 1]; sum3[n] += (int)va[6] * vb[2 * n]; //k3 sum3[n] += (int)va[7] * vb[2 * n + 1]; } va += 8; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = float2int8(((float)sum0[n] * scale_requant_in0 + bias0) * scale_requant_out0); output1[n] = float2int8(((float)sum1[n] * scale_requant_in1 + bias1) * scale_requant_out1); output2[n] = float2int8(((float)sum2[n] * scale_requant_in2 + bias2) * scale_requant_out2); output3[n] = float2int8(((float)sum3[n] * scale_requant_in3 + bias3) * scale_requant_out3); } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char *vb = bottom_tm.channel(j / 4 + j % 4); signed char *va = kernel_tm.channel(i / 4); int k = 0; for (; k + 1 < K; k = k + 2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k < K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0); output1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1); output2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2); output3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3); output0++; output1++; output2++; output3++; } } for (int i = remain_outch_start; i < outch; i++) { signed char *output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0. f; const float scale_requant_in0 = scale_requant[2 * i]; const float scale_requant_out0 = scale_requant[2 * i + 1]; int j = 0; for (; j + 3 < N; j = j + 4) { signed char *vb = bottom_tm.channel(j / 4); signed char *va = kernel_tm.channel(i / 4 + i % 4); int sum[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[2 * n]; sum[n] += (int)va[1] * vb[2 * n + 1]; } va += 2; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = float2int8(((float)sum[n] * scale_requant_in0 + bias0) * scale_requant_out0); } output += 4; } for (; j < N; j++) { int sum = 0; signed char *vb = bottom_tm.channel(j / 4 + j % 4); signed char *va = kernel_tm.channel(i / 4 + i % 4); for (int k = 0; k < K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = float2int8(((float)sum * scale_requant_in0 + bias0) * scale_requant_out0); output++; } } } ////sgemm(int M, int N, int K, float *A, float *B, float *C) // { //for (int i = 0; i < M; i++) // { //int *output = top_blob.channel(i); //for (int j = 0; j < N; j++) // { //int sum = 0; //signed char *vb = (signed char *)bottom_im2row + K * j; //const signed char *va = kernel + K * i; //for (int k = 0; k < K; k++) // { //sum += (int)va[0] * vb[0]; //va += 1; //vb += 1; // } //output[0] = sum; //output++; // } // } // } }
// BUG1989 is pleased to support the open source community by supporting ncnn available. // //Copyright(C) 2019 BUG1989.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static inline signed char float2int8(float v) { int int32 = round(v); if (int32 > 127) return 127; if (int32 < -127) return -127; return (signed char)int32; } static void conv_im2col_sgemm_int8_sse(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option & opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; //im2row Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator); { signed char *ret = (signed char *)bottom_im2row; int retID = 0; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { for (int p = 0; p < inch; p++) { const signed char *input = bottom_blob.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; //int M = outch; //outch int N = outw * outh; //outsize or out stride int K = kernel_w * kernel_h * inch; //ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t) 1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char *img0 = bottom_im2row.row < signed char >(i); const signed char *img1 = bottom_im2row.row < signed char >(i + 1); const signed char *img2 = bottom_im2row.row < signed char >(i + 2); const signed char *img3 = bottom_im2row.row < signed char >(i + 3); signed char *tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < out_size; i++) { const signed char *img0 = bottom_im2row.row < signed char >(i); signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } //kernel memory packed 4 x 4 Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t) 1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char *k0 = kernel + (p + 0) * inch * kernel_size; const signed char *k1 = kernel + (p + 1) * inch * kernel_size; const signed char *k2 = kernel + (p + 2) * inch * kernel_size; const signed char *k3 = kernel + (p + 3) * inch * kernel_size; signed char *ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch * kernel_size; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { const signed char *k0 = kernel + (p + 0) * inch * kernel_size; signed char *ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } //4 x4 // sgemm(int M, int N, int K, float *A, float *B, float *C) { //int M = outch; //outch // int N = outw * outh; //outsize or out stride // int L = kernel_w * kernel_h * inch; //ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 4; int *output0 = top_blob.channel(i); int *output1 = top_blob.channel(i + 1); int *output2 = top_blob.channel(i + 2); int *output3 = top_blob.channel(i + 3); int j = 0; for (; j + 3 < N; j = j + 4) { signed char *vb = bottom_tm.channel(j / 4); signed char *va = kernel_tm.channel(i / 4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[2 * n]; //k0 sum0[n] += (int)va[1] * vb[2 * n + 1]; sum1[n] += (int)va[2] * vb[2 * n]; //k1 sum1[n] += (int)va[3] * vb[2 * n + 1]; sum2[n] += (int)va[4] * vb[2 * n]; //k2 sum2[n] += (int)va[5] * vb[2 * n + 1]; sum3[n] += (int)va[6] * vb[2 * n]; //k3 sum3[n] += (int)va[7] * vb[2 * n + 1]; } va += 8; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char *vb = bottom_tm.channel(j / 4 + j % 4); signed char *va = kernel_tm.channel(i / 4); int k = 0; for (; k + 1 < K; k = k + 2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k < K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_outch_start; i < outch; i++) { int *output = top_blob.channel(i); int j = 0; for (; j + 3 < N; j = j + 4) { signed char *vb = bottom_tm.channel(j / 4); signed char *va = kernel_tm.channel(i / 4 + i % 4); int sum[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[2 * n]; sum[n] += (int)va[1] * vb[2 * n + 1]; } va += 2; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = sum[n]; } output += 4; } for (; j < N; j++) { int sum = 0; signed char *vb = bottom_tm.channel(j / 4 + j % 4); signed char *va = kernel_tm.channel(i / 4 + i % 4); for (int k = 0; k < K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum; output++; } } } ////sgemm(int M, int N, int K, float *A, float *B, float *C) // { //for (int i = 0; i < M; i++) // { //int *output = top_blob.channel(i); //for (int j = 0; j < N; j++) // { //int sum = 0; //signed char *vb = (signed char *)bottom_im2row + K * j; //const signed char *va = kernel + K * i; //for (int k = 0; k < K; k++) // { //sum += (int)va[0] * vb[0]; //va += 1; //vb += 1; // } //output[0] = sum; //output++; // } // } // } } static void conv_im2col_sgemm_int8_dequant_sse(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat & _bias, std: :vector < float >scale_dequant, const Option & opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; const float *bias = _bias; //im2row Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator); { signed char *ret = (signed char *)bottom_im2row; int retID = 0; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { for (int p = 0; p < inch; p++) { const signed char *input = bottom_blob.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; //int M = outch; //outch int N = outw * outh; //outsize or out stride int K = kernel_w * kernel_h * inch; //ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t) 1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char *img0 = bottom_im2row.row < signed char >(i); const signed char *img1 = bottom_im2row.row < signed char >(i + 1); const signed char *img2 = bottom_im2row.row < signed char >(i + 2); const signed char *img3 = bottom_im2row.row < signed char >(i + 3); signed char *tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < out_size; i++) { const signed char *img0 = bottom_im2row.row < signed char >(i); signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } //kernel memory packed 4 x 4 Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t) 1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char *k0 = kernel + (p + 0) * inch * kernel_size; const signed char *k1 = kernel + (p + 1) * inch * kernel_size; const signed char *k2 = kernel + (p + 2) * inch * kernel_size; const signed char *k3 = kernel + (p + 3) * inch * kernel_size; signed char *ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch * kernel_size; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { const signed char *k0 = kernel + (p + 0) * inch * kernel_size; signed char *ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } //4 x4 // sgemm(int M, int N, int K, float *A, float *B, float *C) { //int M = outch; //outch // int N = outw * outh; //outsize or out stride // int L = kernel_w * kernel_h * inch; //ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 4; const float bias0 = bias ? bias[i] : 0. f; const float bias1 = bias ? bias[i + 1] : 0. f; const float bias2 = bias ? bias[i + 2] : 0. f; const float bias3 = bias ? bias[i + 3] : 0. f; const float scale_dequant0 = scale_dequant[i]; const float scale_dequant1 = scale_dequant[i + 1]; const float scale_dequant2 = scale_dequant[i + 2]; const float scale_dequant3 = scale_dequant[i + 3]; float *output0 = top_blob.channel(i); float *output1 = top_blob.channel(i + 1); float *output2 = top_blob.channel(i + 2); float *output3 = top_blob.channel(i + 3); int j = 0; for (; j + 3 < N; j = j + 4) { signed char *vb = bottom_tm.channel(j / 4); signed char *va = kernel_tm.channel(i / 4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[2 * n]; //k0 sum0[n] += (int)va[1] * vb[2 * n + 1]; sum1[n] += (int)va[2] * vb[2 * n]; //k1 sum1[n] += (int)va[3] * vb[2 * n + 1]; sum2[n] += (int)va[4] * vb[2 * n]; //k2 sum2[n] += (int)va[5] * vb[2 * n + 1]; sum3[n] += (int)va[6] * vb[2 * n]; //k3 sum3[n] += (int)va[7] * vb[2 * n + 1]; } va += 8; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = (float)sum0[n] * scale_dequant0 + bias0; output1[n] = (float)sum1[n] * scale_dequant1 + bias1; output2[n] = (float)sum2[n] * scale_dequant2 + bias2; output3[n] = (float)sum3[n] * scale_dequant3 + bias3; } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char *vb = bottom_tm.channel(j / 4 + j % 4); signed char *va = kernel_tm.channel(i / 4); int k = 0; for (; k + 1 < K; k = k + 2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k < K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = (float)sum0 *scale_dequant0 + bias0; output1[0] = (float)sum1 *scale_dequant1 + bias1; output2[0] = (float)sum2 *scale_dequant2 + bias2; output3[0] = (float)sum3 *scale_dequant3 + bias3; output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_outch_start; i < outch; i++) { float *output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0. f; const float scale_dequant0 = scale_dequant[i]; int j = 0; for (; j + 3 < N; j = j + 4) { signed char *vb = bottom_tm.channel(j / 4); signed char *va = kernel_tm.channel(i / 4 + i % 4); int sum[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[2 * n]; sum[n] += (int)va[1] * vb[2 * n + 1]; } va += 2; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = (float)sum[n] * scale_dequant0 + bias0; } output += 4; } for (; j < N; j++) { int sum = 0; signed char *vb = bottom_tm.channel(j / 4 + j % 4); signed char *va = kernel_tm.channel(i / 4 + i % 4); for (int k = 0; k < K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = (float)sum *scale_dequant0 + bias0; output++; } } } ////sgemm(int M, int N, int K, float *A, float *B, float *C) // { //for (int i = 0; i < M; i++) // { //int *output = top_blob.channel(i); //for (int j = 0; j < N; j++) // { //int sum = 0; //signed char *vb = (signed char *)bottom_im2row + K * j; //const signed char *va = kernel + K * i; //for (int k = 0; k < K; k++) // { //sum += (int)va[0] * vb[0]; //va += 1; //vb += 1; // } //output[0] = sum; //output++; // } // } // } } static void conv_im2col_sgemm_int8_requant_sse(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat & _bias, std: :vector < float >scale_requant, const Option & opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; const float *bias = _bias; //im2row Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator); { signed char *ret = (signed char *)bottom_im2row; int retID = 0; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { for (int p = 0; p < inch; p++) { const signed char *input = bottom_blob.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; //int M = outch; //outch int N = outw * outh; //outsize or out stride int K = kernel_w * kernel_h * inch; //ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t) 1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char *img0 = bottom_im2row.row < signed char >(i); const signed char *img1 = bottom_im2row.row < signed char >(i + 1); const signed char *img2 = bottom_im2row.row < signed char >(i + 2); const signed char *img3 = bottom_im2row.row < signed char >(i + 3); signed char *tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < out_size; i++) { const signed char *img0 = bottom_im2row.row < signed char >(i); signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } //kernel memory packed 4 x 4 Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t) 1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char *k0 = kernel + (p + 0) * inch * kernel_size; const signed char *k1 = kernel + (p + 1) * inch * kernel_size; const signed char *k2 = kernel + (p + 2) * inch * kernel_size; const signed char *k3 = kernel + (p + 3) * inch * kernel_size; signed char *ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch * kernel_size; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { const signed char *k0 = kernel + (p + 0) * inch * kernel_size; signed char *ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } //4 x4 // sgemm(int M, int N, int K, float *A, float *B, float *C) { //int M = outch; //outch // int N = outw * outh; //outsize or out stride // int L = kernel_w * kernel_h * inch; //ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 4; signed char *output0 = top_blob.channel(i); signed char *output1 = top_blob.channel(i + 1); signed char *output2 = top_blob.channel(i + 2); signed char *output3 = top_blob.channel(i + 3); const float bias0 = bias ? bias[i] : 0. f; const float bias1 = bias ? bias[i + 1] : 0. f; const float bias2 = bias ? bias[i + 2] : 0. f; const float bias3 = bias ? bias[i + 3] : 0. f; const float scale_requant_in0 = scale_requant[2 * i]; const float scale_requant_out0 = scale_requant[2 * i + 1]; const float scale_requant_in1 = scale_requant[2 * (i + 1)]; const float scale_requant_out1 = scale_requant[2 * (i + 1) + 1]; const float scale_requant_in2 = scale_requant[2 * (i + 2)]; const float scale_requant_out2 = scale_requant[2 * (i + 2) + 1]; const float scale_requant_in3 = scale_requant[2 * (i + 3)]; const float scale_requant_out3 = scale_requant[2 * (i + 3) + 1]; int j = 0; for (; j + 3 < N; j = j + 4) { signed char *vb = bottom_tm.channel(j / 4); signed char *va = kernel_tm.channel(i / 4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[2 * n]; //k0 sum0[n] += (int)va[1] * vb[2 * n + 1]; sum1[n] += (int)va[2] * vb[2 * n]; //k1 sum1[n] += (int)va[3] * vb[2 * n + 1]; sum2[n] += (int)va[4] * vb[2 * n]; //k2 sum2[n] += (int)va[5] * vb[2 * n + 1]; sum3[n] += (int)va[6] * vb[2 * n]; //k3 sum3[n] += (int)va[7] * vb[2 * n + 1]; } va += 8; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = float2int8(((float)sum0[n] * scale_requant_in0 + bias0) * scale_requant_out0); output1[n] = float2int8(((float)sum1[n] * scale_requant_in1 + bias1) * scale_requant_out1); output2[n] = float2int8(((float)sum2[n] * scale_requant_in2 + bias2) * scale_requant_out2); output3[n] = float2int8(((float)sum3[n] * scale_requant_in3 + bias3) * scale_requant_out3); } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char *vb = bottom_tm.channel(j / 4 + j % 4); signed char *va = kernel_tm.channel(i / 4); int k = 0; for (; k + 1 < K; k = k + 2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k < K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0); output1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1); output2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2); output3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3); output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_outch_start; i < outch; i++) { signed char *output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0. f; const float scale_requant_in0 = scale_requant[2 * i]; const float scale_requant_out0 = scale_requant[2 * i + 1]; int j = 0; for (; j + 3 < N; j = j + 4) { signed char *vb = bottom_tm.channel(j / 4); signed char *va = kernel_tm.channel(i / 4 + i % 4); int sum[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[2 * n]; sum[n] += (int)va[1] * vb[2 * n + 1]; } va += 2; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = float2int8(((float)sum[n] * scale_requant_in0 + bias0) * scale_requant_out0); } output += 4; } for (; j < N; j++) { int sum = 0; signed char *vb = bottom_tm.channel(j / 4 + j % 4); signed char *va = kernel_tm.channel(i / 4 + i % 4); for (int k = 0; k < K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = float2int8(((float)sum * scale_requant_in0 + bias0) * scale_requant_out0); output++; } } } ////sgemm(int M, int N, int K, float *A, float *B, float *C) // { //for (int i = 0; i < M; i++) // { //int *output = top_blob.channel(i); //for (int j = 0; j < N; j++) // { //int sum = 0; //signed char *vb = (signed char *)bottom_im2row + K * j; //const signed char *va = kernel + K * i; //for (int k = 0; k < K; k++) // { //sum += (int)va[0] * vb[0]; //va += 1; //vb += 1; // } //output[0] = sum; //output++; // } // } // } }
GB_unop__identity_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: (none) // op(A') function: GB_unop_tran__identity_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( float *Cx, // Cx and Ax may be aliased const float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: (none) // op(A') function: GB_unop_tran__identity_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( float *Cx, // Cx and Ax may be aliased const float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: (none) // op(A') function: GB_unop_tran__identity_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( float *Cx, // Cx and Ax may be aliased const float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else if (NDim == 6) { \ const int ndim = 6; \ {__VA_ARGS__} \ } else if (NDim == 7) { \ const int ndim = 7; \ {__VA_ARGS__} \ } else if (NDim == 8) { \ const int ndim = 8; \ {__VA_ARGS__} \ } else if (NDim == 9) { \ const int ndim = 9; \ {__VA_ARGS__} \ } else if (NDim == 10) { \ const int ndim = 10; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } template <typename T> struct AccType { using type = T; }; template <> struct AccType<mshadow::half::half_t> { using type = float; }; #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int32_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Invalid loading enum type " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) #define MXNET_ADD_ALL_TYPES_WITH_BOOL \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) \ .add_enum("bool", mshadow::kBool) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates */ template<int ndim> MSHADOW_XINLINE bool inc(Shape<ndim>* coord, const Shape<ndim>& shape) { ++(*coord)[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; } return (*coord)[0] < shape[0]; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH_WITH_BOOL(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } /*! \brief input is a tensor and the output is a boolean tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and two scalar value with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is a tensor and a scalar value with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
/*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else if (NDim == 6) { \ const int ndim = 6; \ {__VA_ARGS__} \ } else if (NDim == 7) { \ const int ndim = 7; \ {__VA_ARGS__} \ } else if (NDim == 8) { \ const int ndim = 8; \ {__VA_ARGS__} \ } else if (NDim == 9) { \ const int ndim = 9; \ {__VA_ARGS__} \ } else if (NDim == 10) { \ const int ndim = 10; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } template <typename T> struct AccType { using type = T; }; template <> struct AccType<mshadow::half::half_t> { using type = float; }; #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int32_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Invalid loading enum type " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) #define MXNET_ADD_ALL_TYPES_WITH_BOOL \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) \ .add_enum("bool", mshadow::kBool) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates */ template<int ndim> MSHADOW_XINLINE bool inc(Shape<ndim>* coord, const Shape<ndim>& shape) { ++(*coord)[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; } return (*coord)[0] < shape[0]; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH_WITH_BOOL(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } /*! \brief input is a tensor and the output is a boolean tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and two scalar value with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is a tensor and a scalar value with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
/*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else if (NDim == 6) { \ const int ndim = 6; \ {__VA_ARGS__} \ } else if (NDim == 7) { \ const int ndim = 7; \ {__VA_ARGS__} \ } else if (NDim == 8) { \ const int ndim = 8; \ {__VA_ARGS__} \ } else if (NDim == 9) { \ const int ndim = 9; \ {__VA_ARGS__} \ } else if (NDim == 10) { \ const int ndim = 10; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } template <typename T> struct AccType { using type = T; }; template <> struct AccType<mshadow::half::half_t> { using type = float; }; #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int32_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Invalid loading enum type " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) #define MXNET_ADD_ALL_TYPES_WITH_BOOL \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) \ .add_enum("bool", mshadow::kBool) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates */ template<int ndim> MSHADOW_XINLINE bool inc(Shape<ndim>* coord, const Shape<ndim>& shape) { ++(*coord)[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; } return (*coord)[0] < shape[0]; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH_WITH_BOOL(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } /*! \brief input is a tensor and the output is a boolean tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and two scalar value with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is a tensor and a scalar value with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
parallel_for_wrapper.h
#pragma once #include <omp.h> #include <thread> #include <opencv2/opencv.hpp> /* On OpenCV 3.2.0, cv::parallel_for_ combined with lambda function is not supported (occurring below error). If version of the library is greater than 3.2.0. "parallel_for_omp" can be replaced by cv::parallel_for_ without loss of lambda function calling. /usr/local/include/opencv2/core/utility.hpp:478:75: note: in passing argument 2 of ‘void cv::parallel_for_(const cv::Range&, const cv::ParallelLoopBody&, double)’ 478 | CV_EXPORTS void parallel_for_(const Range& range, const ParallelLoopBody& body, double nstripes=-1.); */ template <class BODY> void parallel_for_omp(const cv::Range& range, BODY body) { #pragma omp parallel for for (int i = range.start; i < range.end; ++i) body(cv::Range(i, i + 1)); }
#pragma once #include <omp.h> #include <thread> #include <opencv2/opencv.hpp> /* On OpenCV 3.2.0, cv::parallel_for_ combined with lambda function is not supported (occurring below error). If version of the library is greater than 3.2.0. "parallel_for_omp" can be replaced by cv::parallel_for_ without loss of lambda function calling. /usr/local/include/opencv2/core/utility.hpp:478:75: note: in passing argument 2 of ‘void cv::parallel_for_(const cv::Range&, const cv::ParallelLoopBody&, double)’ 478 | CV_EXPORTS void parallel_for_(const Range& range, const ParallelLoopBody& body, double nstripes=-1.); */ template <class BODY> void parallel_for_omp(const cv::Range& range, BODY body) { for (int i = range.start; i < range.end; ++i) body(cv::Range(i, i + 1)); }
#pragma once #include <omp.h> #include <thread> #include <opencv2/opencv.hpp> /* On OpenCV 3.2.0, cv::parallel_for_ combined with lambda function is not supported (occurring below error). If version of the library is greater than 3.2.0. "parallel_for_omp" can be replaced by cv::parallel_for_ without loss of lambda function calling. /usr/local/include/opencv2/core/utility.hpp:478:75: note: in passing argument 2 of ‘void cv::parallel_for_(const cv::Range&, const cv::ParallelLoopBody&, double)’ 478 | CV_EXPORTS void parallel_for_(const Range& range, const ParallelLoopBody& body, double nstripes=-1.); */ template <class BODY> void parallel_for_omp(const cv::Range& range, BODY body) { #pragma omp parallel for for (int i = range.start; i < range.end; ++i) body(cv::Range(i, i + 1)); }
drift.c
/* * clockperf * * Copyright (c) 2016-2021, Steven Noonan <steven@uplinklabs.net> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ #include "prefix.h" #include "affinity.h" #include "clock.h" #include "drift.h" #ifdef HAVE_DRIFT_TESTS #include <assert.h> #include <stdbool.h> #include <omp.h> struct global_cfg { struct clockspec clk; struct clockspec ref; }; typedef enum { UNSTARTED = 0, // not yet spawned WAITING = 1, // waiting for requests from master REPORTING = 2, // thread asked to report in EXITING = 3, // thread asked to exit DEAD = 4, // thread exited } thread_state; struct thread_ctx { thread_state state; uint64_t last_clk; uint64_t last_ref; char padding[104]; // padding to at least one L2 cache line wide }; static inline int driftsleep(int usec) { #ifdef TARGET_OS_WINDOWS usec /= 1000; if (usec < 1) usec = 1; Sleep(usec); return 0; #else return usleep(usec); #endif } static uint32_t thread_count; void drift_init(void) { #pragma omp parallel { #pragma omp master { thread_count = omp_get_num_threads(); } } } void drift_run(uint32_t runtime_ms, struct clockspec clkid, struct clockspec refid) { uint32_t idx; struct thread_ctx *threads = NULL; struct global_cfg cfg; memset(&cfg, 0, sizeof(struct global_cfg)); cfg.clk = clkid; cfg.ref = refid; threads = (struct thread_ctx *)calloc(thread_count, sizeof(struct thread_ctx)); /* Spawn drift thread per CPU */ #pragma omp parallel { #pragma omp master { struct thread_ctx *thread, *this = NULL; uint64_t start_ref, start_clk; int64_t delta_clk, expect_ms_ref; uint32_t unstarted; do { unstarted = 0; for (idx = 0; idx < thread_count; idx++) { thread = &threads[idx]; if (thread->state == UNSTARTED) { unstarted++; this = thread; } } } while (unstarted != 1); thread_bind(omp_get_thread_num()); //uint64_t curr_clk; //int64_t delta_ref, expect_ms_clk; clock_read(cfg.clk, &start_clk); clock_read(cfg.ref, &start_ref); do { for (idx = 0; idx < thread_count; idx++) { thread = &threads[idx]; if (thread->state > UNSTARTED) thread->state = REPORTING; } clock_read(cfg.clk, &this->last_clk); clock_read(cfg.ref, &this->last_ref); for (idx = 0; idx < thread_count; idx++) { thread = &threads[idx]; while (thread->state == REPORTING) driftsleep(10); } expect_ms_ref = (this->last_ref / 1000000ULL) - (start_ref / 1000000ULL); //expect_ms_clk = (this->last_clk / 1000000ULL) - (start_clk / 1000000ULL); printf("%9" PRId64 ": ", expect_ms_ref); for (idx = 0; idx < thread_count; idx++) { //int64_t ref_ms; int64_t clk_ms; thread = &threads[idx]; //ref_ms = (thread->last_ref / 1000000ULL) - (start_ref / 1000000ULL); clk_ms = (thread->last_clk / 1000000ULL) - (start_clk / 1000000ULL); //delta_ref = (ref_ms - expect_ms_ref); delta_clk = (clk_ms - expect_ms_ref); printf("%6" PRId64 ", ", delta_clk); if ((idx + 1) % 8 == 0 && idx < thread_count - 1) printf("\n%11s", ""); } printf("\n"); driftsleep(1000000); } while(expect_ms_ref < runtime_ms); for (idx = 0; idx < thread_count; idx++) { thread = &threads[idx]; thread->state = EXITING; } } #pragma omp for for(uint32_t i = 0; i < thread_count; i++) { uint32_t thread_id = omp_get_thread_num(); struct thread_ctx *ctx = &threads[thread_id]; struct clockspec clk_id = cfg.clk; struct clockspec ref_id = cfg.ref; thread_bind(thread_id); //printf("starting thread %d : %d\n", thread_id, i); if (ctx->state != UNSTARTED) continue; do { uint64_t clk; uint64_t ref; while (ctx->state == WAITING) { //printf("thread %d:%d waiting\n", thread_id, i); driftsleep(100); } if (ctx->state == EXITING) break; clock_read(clk_id, &clk); clock_read(ref_id, &ref); ctx->last_clk = clk; ctx->last_ref = ref; ctx->state = WAITING; } while(1); ctx->state = DEAD; } } free(threads); } #endif /* vim: set ts=4 sts=4 sw=4 et: */
#include "prefix.h" #include "affinity.h" #include "clock.h" #include "drift.h" #ifdef HAVE_DRIFT_TESTS #include <assert.h> #include <stdbool.h> #include <omp.h> struct global_cfg { struct clockspec clk; struct clockspec ref; }; typedef enum { UNSTARTED = 0, //not yet spawned WAITING = 1, //waiting for requests from master REPORTING = 2, //thread asked to report in EXITING = 3, //thread asked to exit DEAD = 4, //thread exited } thread_state; struct thread_ctx { thread_state state; uint64_t last_clk; uint64_t last_ref; char padding[104]; //padding to at least one L2 cache line wide }; static inline int driftsleep(int usec) { #ifdef TARGET_OS_WINDOWS usec /= 1000; if (usec < 1) usec = 1; Sleep(usec); return 0; #else return usleep(usec); #endif } static uint32_t thread_count; void drift_init(void) { #pragma omp master { thread_count = omp_get_num_threads(); } } void drift_run(uint32_t runtime_ms, struct clockspec clkid, struct clockspec refid) { uint32_t idx; struct thread_ctx *threads = NULL; struct global_cfg cfg; memset(&cfg, 0, sizeof(struct global_cfg)); cfg.clk = clkid; cfg.ref = refid; threads = (struct thread_ctx *)calloc(thread_count, sizeof(struct thread_ctx)); /* Spawn drift thread per CPU */ #pragma omp master { struct thread_ctx *thread, *this = NULL; uint64_t start_ref, start_clk; int64_t delta_clk, expect_ms_ref; uint32_t unstarted; do { unstarted = 0; for (idx = 0; idx < thread_count; idx++) { thread = &threads[idx]; if (thread->state == UNSTARTED) { unstarted++; this = thread; } } } while (unstarted != 1); thread_bind(omp_get_thread_num()); //uint64_t curr_clk; //int64_t delta_ref, expect_ms_clk; clock_read(cfg.clk, &start_clk); clock_read(cfg.ref, &start_ref); do { for (idx = 0; idx < thread_count; idx++) { thread = &threads[idx]; if (thread->state > UNSTARTED) thread->state = REPORTING; } clock_read(cfg.clk, &this->last_clk); clock_read(cfg.ref, &this->last_ref); for (idx = 0; idx < thread_count; idx++) { thread = &threads[idx]; while (thread->state == REPORTING) driftsleep(10); } expect_ms_ref = (this->last_ref / 1000000ULL) - (start_ref / 1000000ULL); //expect_ms_clk = (this->last_clk / 1000000ULL) - (start_clk / 1000000ULL); printf("%9" PRId64 ": ", expect_ms_ref); for (idx = 0; idx < thread_count; idx++) { //int64_t ref_ms; int64_t clk_ms; thread = &threads[idx]; //ref_ms = (thread->last_ref / 1000000ULL) - (start_ref / 1000000ULL); clk_ms = (thread->last_clk / 1000000ULL) - (start_clk / 1000000ULL); //delta_ref = (ref_ms - expect_ms_ref); delta_clk = (clk_ms - expect_ms_ref); printf("%6" PRId64 ", ", delta_clk); if ((idx + 1) % 8 == 0 && idx < thread_count - 1) printf("\n%11s", ""); } printf("\n"); driftsleep(1000000); } while (expect_ms_ref < runtime_ms); for (idx = 0; idx < thread_count; idx++) { thread = &threads[idx]; thread->state = EXITING; } } for (uint32_t i = 0; i < thread_count; i++) { uint32_t thread_id = omp_get_thread_num(); struct thread_ctx *ctx = &threads[thread_id]; struct clockspec clk_id = cfg.clk; struct clockspec ref_id = cfg.ref; thread_bind(thread_id); //printf("starting thread %d : %d\n", thread_id, i); if (ctx->state != UNSTARTED) continue; do { uint64_t clk; uint64_t ref; while (ctx->state == WAITING) { //printf("thread %d:%d waiting\n", thread_id, i); driftsleep(100); } if (ctx->state == EXITING) break; clock_read(clk_id, &clk); clock_read(ref_id, &ref); ctx->last_clk = clk; ctx->last_ref = ref; ctx->state = WAITING; } while (1); ctx->state = DEAD; } free(threads); } #endif /* vim: set ts=4 sts=4 sw=4 et: */
#include "prefix.h" #include "affinity.h" #include "clock.h" #include "drift.h" #ifdef HAVE_DRIFT_TESTS #include <assert.h> #include <stdbool.h> #include <omp.h> struct global_cfg { struct clockspec clk; struct clockspec ref; }; typedef enum { UNSTARTED = 0, //not yet spawned WAITING = 1, //waiting for requests from master REPORTING = 2, //thread asked to report in EXITING = 3, //thread asked to exit DEAD = 4, //thread exited } thread_state; struct thread_ctx { thread_state state; uint64_t last_clk; uint64_t last_ref; char padding[104]; //padding to at least one L2 cache line wide }; static inline int driftsleep(int usec) { #ifdef TARGET_OS_WINDOWS usec /= 1000; if (usec < 1) usec = 1; Sleep(usec); return 0; #else return usleep(usec); #endif } static uint32_t thread_count; void drift_init(void) { #pragma omp parallel { #pragma omp master { thread_count = omp_get_num_threads(); } } } void drift_run(uint32_t runtime_ms, struct clockspec clkid, struct clockspec refid) { uint32_t idx; struct thread_ctx *threads = NULL; struct global_cfg cfg; memset(&cfg, 0, sizeof(struct global_cfg)); cfg.clk = clkid; cfg.ref = refid; threads = (struct thread_ctx *)calloc(thread_count, sizeof(struct thread_ctx)); /* Spawn drift thread per CPU */ #pragma omp parallel { #pragma omp master { struct thread_ctx *thread, *this = NULL; uint64_t start_ref, start_clk; int64_t delta_clk, expect_ms_ref; uint32_t unstarted; do { unstarted = 0; for (idx = 0; idx < thread_count; idx++) { thread = &threads[idx]; if (thread->state == UNSTARTED) { unstarted++; this = thread; } } } while (unstarted != 1); thread_bind(omp_get_thread_num()); //uint64_t curr_clk; //int64_t delta_ref, expect_ms_clk; clock_read(cfg.clk, &start_clk); clock_read(cfg.ref, &start_ref); do { for (idx = 0; idx < thread_count; idx++) { thread = &threads[idx]; if (thread->state > UNSTARTED) thread->state = REPORTING; } clock_read(cfg.clk, &this->last_clk); clock_read(cfg.ref, &this->last_ref); for (idx = 0; idx < thread_count; idx++) { thread = &threads[idx]; while (thread->state == REPORTING) driftsleep(10); } expect_ms_ref = (this->last_ref / 1000000ULL) - (start_ref / 1000000ULL); //expect_ms_clk = (this->last_clk / 1000000ULL) - (start_clk / 1000000ULL); printf("%9" PRId64 ": ", expect_ms_ref); for (idx = 0; idx < thread_count; idx++) { //int64_t ref_ms; int64_t clk_ms; thread = &threads[idx]; //ref_ms = (thread->last_ref / 1000000ULL) - (start_ref / 1000000ULL); clk_ms = (thread->last_clk / 1000000ULL) - (start_clk / 1000000ULL); //delta_ref = (ref_ms - expect_ms_ref); delta_clk = (clk_ms - expect_ms_ref); printf("%6" PRId64 ", ", delta_clk); if ((idx + 1) % 8 == 0 && idx < thread_count - 1) printf("\n%11s", ""); } printf("\n"); driftsleep(1000000); } while (expect_ms_ref < runtime_ms); for (idx = 0; idx < thread_count; idx++) { thread = &threads[idx]; thread->state = EXITING; } } #pragma omp for for (uint32_t i = 0; i < thread_count; i++) { uint32_t thread_id = omp_get_thread_num(); struct thread_ctx *ctx = &threads[thread_id]; struct clockspec clk_id = cfg.clk; struct clockspec ref_id = cfg.ref; thread_bind(thread_id); //printf("starting thread %d : %d\n", thread_id, i); if (ctx->state != UNSTARTED) continue; do { uint64_t clk; uint64_t ref; while (ctx->state == WAITING) { //printf("thread %d:%d waiting\n", thread_id, i); driftsleep(100); } if (ctx->state == EXITING) break; clock_read(clk_id, &clk); clock_read(ref_id, &ref); ctx->last_clk = clk; ctx->last_ref = ref; ctx->state = WAITING; } while (1); ctx->state = DEAD; } } free(threads); } #endif /* vim: set ts=4 sts=4 sw=4 et: */
memdbg.h
/* ****** NOTE ****** * This header file should be the LAST header file included within every * .c file within the project. If there are .h files that have actual * code in them, then this header should be the last include within that * .h file, and that .h file should be the last one included within the * .c file. * ****** NOTE ***** */ #if !defined (__MEM_DBG_H_) #define __MEM_DBG_H_ // values to use within the MemDbg_Validate() function. #define MEMDBG_VALIDATE_MIN 0 #define MEMDBG_VALIDATE_DEEP 1 #define MEMDBG_VALIDATE_DEEPER 2 #define MEMDBG_VALIDATE_DEEPEST 3 #include <stdio.h> #include <stdlib.h> #include "os.h" #if (!AC_BUILT || HAVE_UNISTD_H) && !_MSC_VER #include <unistd.h> #endif #include <string.h> #include "memory.h" #if defined (MEMDBG_ON) /* * This software was written by Jim Fougeron jfoug AT cox dot net * in 2013. No copyright is claimed, and the software is hereby * placed in the public domain. In case this attempt to disclaim * copyright and place the software in the public domain is deemed * null and void, then the software is Copyright (c) 2013 Jim Fougeron * and it is hereby released to the general public under the following * terms: * * This software may be modified, redistributed, and used for any * purpose, in source and binary forms, with or without modification. */ /* * memdbg.h * Memory management debugging (at runtime) * * memdbg contains routines detect, and report memory * problems, such as double frees, passing bad pointers to * free, most buffer overwrites. Also, tracking of non-freed * data, showing memory leaks, can also be shown. * * Compilation Options (provided from Makefile CFLAGS) * * MEMDBG_ON If this is NOT defined, then memdbg will * get out of your way, and most normal memory functions * will be called with no overhead at all. */ /* these functions can be called by client code. Normally Memdbg_Used() and * MemDbg_Display() would be called at program exit. That will dump a list * of any memory that was not released. The MemDbg_Validate() can be called * pretty much any time. That function will walk the memory allocation linked * lists, and sqwack if there are problems, such as overwrites, freed memory that * has been written to, etc. It would likely be good to call MemDbg_Validate() * within benchmarking, after every format is tested. * * TODO: Add a handle that can be passed to the MemDbg_Used() and MemDbg_Display() * and a function to get the 'current' state of memory as a handle. Thus, a * format self test could get a handle BEFORE starting, and then check after, and * ONLY show leaked memory from the time the handle was obtained, which was at the * start of the self test. Thus it would only show leaks from that format test. * * These functions are NOT thread safe. Do not call them within OMP blocks of code. * Normally, these would be called at program exit, or within things like format * self test code, etc, and not within OMP. But this warning is here, so that * it is known NOT to call within OMP. */ extern size_t MemDbg_Used(int show_freed); extern void MemDbg_Display(FILE *); extern void MemDbg_Validate(int level); extern void MemDbg_Validate_msg(int level, const char *pMsg); extern void MemDbg_Validate_msg2(int level, const char *pMsg, int bShowExData); /* these functions should almost NEVER be called by any client code. They * are listed here, because the macros need to know their names. Client code * should almost ALWAYS call malloc() like normal, vs calling MEMDBG_alloc() * If MEMDBG_alloc() was called, and MEMDBG_ON was not defined, then this * function would not be declared here, AND at link time, the function would * not be found. * NOTE, these functions should be thread safe in OMP builds (using #pragma omp atomic) * also note, memory allocation within OMP blocks SHOULD be avoided if possible. It is * very slow, and the thread safety required makes it even slow. This is not only talking * about these functions here, BUT malloc/free in general in OMP blocks. AVOID doing that * at almost all costs, and performance will usually go up. */ extern void *MEMDBG_alloc(size_t, char *, int); extern void *MEMDBG_alloc_align(size_t, int, char *, int); extern void *MEMDBG_calloc(size_t count, size_t, char *, int); extern void *MEMDBG_realloc(void *, size_t, char *, int); extern void MEMDBG_free(const void *, char *, int); extern char *MEMDBG_strdup(const char *, char *, int); #if !defined(__MEMDBG__) /* we get here on every file compiled EXCEPT memdbg.c */ #undef malloc #undef realloc #undef free #undef strdup #undef libc_free #undef libc_calloc #undef libc_malloc #define libc_free(a) MEMDBG_libc_free(a) #define libc_malloc(a) MEMDBG_libc_alloc(a) #define libc_calloc(a,b) MEMDBG_libc_calloc(a,b) #define malloc(a) MEMDBG_alloc((a),__FILE__,__LINE__) #define calloc(a,b) MEMDBG_calloc(a,b,__FILE__,__LINE__) #define realloc(a,b) MEMDBG_realloc((a),(b),__FILE__,__LINE__) #define free(a) MEMDBG_free((a),__FILE__,__LINE__) #define strdup(a) MEMDBG_strdup((a),__FILE__,__LINE__) #endif /* !defined __MEMDBG__ */ /* pass the file handle to write to (normally stderr) */ #define MEMDBG_PROGRAM_EXIT_CHECKS(a) do { \ if (MemDbg_Used(0) > 0 || getenv("MEMDBG")) MemDbg_Display(a); \ MemDbg_Validate_msg2(MEMDBG_VALIDATE_DEEPEST, "At Program Exit", 1); } while(0) typedef struct MEMDBG_HANDLE_t { unsigned id; unsigned alloc_cnt; size_t mem_size; } MEMDBG_HANDLE; /* * these functions give a caller some of the INSIDE information about the * allocated object. We simply return data from inside the memdbg header. * NOTE, if fence post is not valid, we still return something, BUT will * also return something in the err_msg stating this may not be valid. */ /* The count 'id' of an allocated block. Same as used in leak report */ unsigned MEMDBG_get_cnt (const void *ptr, const char **err_msg); /* the size allocated of the contained block */ size_t MEMDBG_get_size(const void *ptr, const char **err_msg); /* what file (source) did the allocation */ const char *MEMDBG_get_file(const void *ptr, const char **err_msg); /* what file (source) line number did the allocation */ unsigned MEMDBG_get_line(const void *ptr, const char **err_msg); /* * these functions allow taking a memory snapshot, calling some code, then validating that memory * is the same after the code. This will help catch memory leaks and other such problems, within * formats and such. Simply get the snapshot, run self tests (or other), when it exits, check * the snapshot to make sure nothing leaked. */ /* returning a struct (or passing as params it not super efficient but this is done so infrequently that this is not an issue. */ MEMDBG_HANDLE MEMDBG_getSnapshot(int id); /* will not exit on leaks. Does exit, on memory overwrite corruption. */ void MEMDBG_checkSnapshot(MEMDBG_HANDLE); /* same as MEMDBG_checkSnapshot() but if exit_on_any_leaks is true, will also exit if leaks found. */ void MEMDBG_checkSnapshot_possible_exit_on_error(MEMDBG_HANDLE, int exit_on_any_leaks); /* * the allocations from mem_alloc_tiny() must call this function to flag the memory they allocate * so it is not flagged as a leak, by these HANDLE snapshot functions. 'tiny' memory is expected * to leak, until program exit. At that time, any that was not freed, will be shown as leaked. * THIS function is also thread safe. The other checkSnapshot functions are NOT thread safe. */ void MEMDBG_tag_mem_from_alloc_tiny(void *); extern void MEMDBG_libc_free(void *); extern void *MEMDBG_libc_alloc(size_t size); extern void *MEMDBG_libc_calloc(size_t count, size_t size); #else #define libc_alloc alloc #define libc_calloc calloc #define libc_malloc malloc #define libc_free free #define MemDbg_Used(a) 0 #define MemDbg_Display(a) #define MemDbg_Validate(a) #define MemDbg_Validate_msg(a,b) #define MemDbg_Validate_msg2(a,b,c) #define MEMDBG_PROGRAM_EXIT_CHECKS(a) #define MEMDBG_tag_mem_from_alloc_tiny(a) #define MEMDBG_HANDLE int #define MEMDBG_getSnapshot(a) 0 #define MEMDBG_checkSnapshot(a) if(a) printf(" \b") #define MEMDBG_checkSnapshot_possible_exit_on_error(a, b) if(a) printf(" \b") #endif /* MEMDBG_ON */ #endif /* __MEMDBG_H_ */
/* * memdbg.h * Memory management debugging (at runtime) * * memdbg contains routines detect, and report memory * problems, such as double frees, passing bad pointers to * free, most buffer overwrites. Also, tracking of non-freed * data, showing memory leaks, can also be shown. * * Compilation Options (provided from Makefile CFLAGS) * * MEMDBG_ON If this is NOT defined, then memdbg will * get out of your way, and most normal memory functions * will be called with no overhead at all. */ /* these functions can be called by client code. Normally Memdbg_Used() and * MemDbg_Display() would be called at program exit. That will dump a list * of any memory that was not released. The MemDbg_Validate() can be called * pretty much any time. That function will walk the memory allocation linked * lists, and sqwack if there are problems, such as overwrites, freed memory that * has been written to, etc. It would likely be good to call MemDbg_Validate() * within benchmarking, after every format is tested. * * TODO: Add a handle that can be passed to the MemDbg_Used() and MemDbg_Display() * and a function to get the 'current' state of memory as a handle. Thus, a * format self test could get a handle BEFORE starting, and then check after, and * ONLY show leaked memory from the time the handle was obtained, which was at the * start of the self test. Thus it would only show leaks from that format test. * * These functions are NOT thread safe. Do not call them within OMP blocks of code. * Normally, these would be called at program exit, or within things like format * self test code, etc, and not within OMP. But this warning is here, so that * it is known NOT to call within OMP. */ extern size_t MemDbg_Used(int show_freed); extern void MemDbg_Display(FILE *); extern void MemDbg_Validate(int level); extern void MemDbg_Validate_msg(int level, const char *pMsg); extern void MemDbg_Validate_msg2(int level, const char *pMsg, int bShowExData); /* these functions should almost NEVER be called by any client code. They * are listed here, because the macros need to know their names. Client code * should almost ALWAYS call malloc() like normal, vs calling MEMDBG_alloc() * If MEMDBG_alloc() was called, and MEMDBG_ON was not defined, then this * function would not be declared here, AND at link time, the function would * not be found. * NOTE, these functions should be thread safe in OMP builds (using * also note, memory allocation within OMP blocks SHOULD be avoided if possible. It is * very slow, and the thread safety required makes it even slow. This is not only talking * about these functions here, BUT malloc/free in general in OMP blocks. AVOID doing that * at almost all costs, and performance will usually go up. */ extern void *MEMDBG_alloc(size_t, char *, int); extern void *MEMDBG_alloc_align(size_t, int, char *, int); extern void *MEMDBG_calloc(size_t count, size_t, char *, int); extern void *MEMDBG_realloc(void *, size_t, char *, int); extern void MEMDBG_free(const void *, char *, int); extern char *MEMDBG_strdup(const char *, char *, int); #if !defined(__MEMDBG__) /* we get here on every file compiled EXCEPT memdbg.c */ #undef malloc #undef realloc #undef free #undef strdup #undef libc_free #undef libc_calloc #undef libc_malloc #define libc_free(a) MEMDBG_libc_free(a) #define libc_malloc(a) MEMDBG_libc_alloc(a) #define libc_calloc(a,b) MEMDBG_libc_calloc(a,b) #define malloc(a) MEMDBG_alloc((a),__FILE__,__LINE__) #define calloc(a,b) MEMDBG_calloc(a,b,__FILE__,__LINE__) #define realloc(a,b) MEMDBG_realloc((a),(b),__FILE__,__LINE__) #define free(a) MEMDBG_free((a),__FILE__,__LINE__) #define strdup(a) MEMDBG_strdup((a),__FILE__,__LINE__) #endif /* !defined __MEMDBG__ */ /* pass the file handle to write to (normally stderr) */ #define MEMDBG_PROGRAM_EXIT_CHECKS(a) do { \ if (MemDbg_Used(0) > 0 || getenv("MEMDBG")) MemDbg_Display(a); \ MemDbg_Validate_msg2(MEMDBG_VALIDATE_DEEPEST, "At Program Exit", 1); } while(0) typedef struct MEMDBG_HANDLE_t { unsigned id; unsigned alloc_cnt; size_t mem_size; } MEMDBG_HANDLE; /* * these functions give a caller some of the INSIDE information about the * allocated object. We simply return data from inside the memdbg header. * NOTE, if fence post is not valid, we still return something, BUT will * also return something in the err_msg stating this may not be valid. */ /* The count 'id' of an allocated block. Same as used in leak report */ unsigned MEMDBG_get_cnt (const void *ptr, const char **err_msg); /* the size allocated of the contained block */ size_t MEMDBG_get_size(const void *ptr, const char **err_msg); /* what file (source) did the allocation */ const char *MEMDBG_get_file(const void *ptr, const char **err_msg); /* what file (source) line number did the allocation */ unsigned MEMDBG_get_line(const void *ptr, const char **err_msg); /* * these functions allow taking a memory snapshot, calling some code, then validating that memory * is the same after the code. This will help catch memory leaks and other such problems, within * formats and such. Simply get the snapshot, run self tests (or other), when it exits, check * the snapshot to make sure nothing leaked. */ /* returning a struct (or passing as params it not super efficient but this is done so infrequently that this is not an issue. */ MEMDBG_HANDLE MEMDBG_getSnapshot(int id); /* will not exit on leaks. Does exit, on memory overwrite corruption. */ void MEMDBG_checkSnapshot(MEMDBG_HANDLE); /* same as MEMDBG_checkSnapshot() but if exit_on_any_leaks is true, will also exit if leaks found. */ void MEMDBG_checkSnapshot_possible_exit_on_error(MEMDBG_HANDLE, int exit_on_any_leaks); /* * the allocations from mem_alloc_tiny() must call this function to flag the memory they allocate * so it is not flagged as a leak, by these HANDLE snapshot functions. 'tiny' memory is expected * to leak, until program exit. At that time, any that was not freed, will be shown as leaked. * THIS function is also thread safe. The other checkSnapshot functions are NOT thread safe. */ void MEMDBG_tag_mem_from_alloc_tiny(void *); extern void MEMDBG_libc_free(void *); extern void *MEMDBG_libc_alloc(size_t size); extern void *MEMDBG_libc_calloc(size_t count, size_t size); #else #define libc_alloc alloc #define libc_calloc calloc #define libc_malloc malloc #define libc_free free #define MemDbg_Used(a) 0 #define MemDbg_Display(a) #define MemDbg_Validate(a) #define MemDbg_Validate_msg(a,b) #define MemDbg_Validate_msg2(a,b,c) #define MEMDBG_PROGRAM_EXIT_CHECKS(a) #define MEMDBG_tag_mem_from_alloc_tiny(a) #define MEMDBG_HANDLE int #define MEMDBG_getSnapshot(a) 0 #define MEMDBG_checkSnapshot(a) if(a) printf(" \b") #define MEMDBG_checkSnapshot_possible_exit_on_error(a, b) if(a) printf(" \b") #endif /* MEMDBG_ON */ #endif /* __MEMDBG_H_ */
/* * memdbg.h * Memory management debugging (at runtime) * * memdbg contains routines detect, and report memory * problems, such as double frees, passing bad pointers to * free, most buffer overwrites. Also, tracking of non-freed * data, showing memory leaks, can also be shown. * * Compilation Options (provided from Makefile CFLAGS) * * MEMDBG_ON If this is NOT defined, then memdbg will * get out of your way, and most normal memory functions * will be called with no overhead at all. */ /* these functions can be called by client code. Normally Memdbg_Used() and * MemDbg_Display() would be called at program exit. That will dump a list * of any memory that was not released. The MemDbg_Validate() can be called * pretty much any time. That function will walk the memory allocation linked * lists, and sqwack if there are problems, such as overwrites, freed memory that * has been written to, etc. It would likely be good to call MemDbg_Validate() * within benchmarking, after every format is tested. * * TODO: Add a handle that can be passed to the MemDbg_Used() and MemDbg_Display() * and a function to get the 'current' state of memory as a handle. Thus, a * format self test could get a handle BEFORE starting, and then check after, and * ONLY show leaked memory from the time the handle was obtained, which was at the * start of the self test. Thus it would only show leaks from that format test. * * These functions are NOT thread safe. Do not call them within OMP blocks of code. * Normally, these would be called at program exit, or within things like format * self test code, etc, and not within OMP. But this warning is here, so that * it is known NOT to call within OMP. */ extern size_t MemDbg_Used(int show_freed); extern void MemDbg_Display(FILE *); extern void MemDbg_Validate(int level); extern void MemDbg_Validate_msg(int level, const char *pMsg); extern void MemDbg_Validate_msg2(int level, const char *pMsg, int bShowExData); /* these functions should almost NEVER be called by any client code. They * are listed here, because the macros need to know their names. Client code * should almost ALWAYS call malloc() like normal, vs calling MEMDBG_alloc() * If MEMDBG_alloc() was called, and MEMDBG_ON was not defined, then this * function would not be declared here, AND at link time, the function would * not be found. * NOTE, these functions should be thread safe in OMP builds (using #pragma omp atomic) * also note, memory allocation within OMP blocks SHOULD be avoided if possible. It is * very slow, and the thread safety required makes it even slow. This is not only talking * about these functions here, BUT malloc/free in general in OMP blocks. AVOID doing that * at almost all costs, and performance will usually go up. */ extern void *MEMDBG_alloc(size_t, char *, int); extern void *MEMDBG_alloc_align(size_t, int, char *, int); extern void *MEMDBG_calloc(size_t count, size_t, char *, int); extern void *MEMDBG_realloc(void *, size_t, char *, int); extern void MEMDBG_free(const void *, char *, int); extern char *MEMDBG_strdup(const char *, char *, int); #if !defined(__MEMDBG__) /* we get here on every file compiled EXCEPT memdbg.c */ #undef malloc #undef realloc #undef free #undef strdup #undef libc_free #undef libc_calloc #undef libc_malloc #define libc_free(a) MEMDBG_libc_free(a) #define libc_malloc(a) MEMDBG_libc_alloc(a) #define libc_calloc(a,b) MEMDBG_libc_calloc(a,b) #define malloc(a) MEMDBG_alloc((a),__FILE__,__LINE__) #define calloc(a,b) MEMDBG_calloc(a,b,__FILE__,__LINE__) #define realloc(a,b) MEMDBG_realloc((a),(b),__FILE__,__LINE__) #define free(a) MEMDBG_free((a),__FILE__,__LINE__) #define strdup(a) MEMDBG_strdup((a),__FILE__,__LINE__) #endif /* !defined __MEMDBG__ */ /* pass the file handle to write to (normally stderr) */ #define MEMDBG_PROGRAM_EXIT_CHECKS(a) do { \ if (MemDbg_Used(0) > 0 || getenv("MEMDBG")) MemDbg_Display(a); \ MemDbg_Validate_msg2(MEMDBG_VALIDATE_DEEPEST, "At Program Exit", 1); } while(0) typedef struct MEMDBG_HANDLE_t { unsigned id; unsigned alloc_cnt; size_t mem_size; } MEMDBG_HANDLE; /* * these functions give a caller some of the INSIDE information about the * allocated object. We simply return data from inside the memdbg header. * NOTE, if fence post is not valid, we still return something, BUT will * also return something in the err_msg stating this may not be valid. */ /* The count 'id' of an allocated block. Same as used in leak report */ unsigned MEMDBG_get_cnt (const void *ptr, const char **err_msg); /* the size allocated of the contained block */ size_t MEMDBG_get_size(const void *ptr, const char **err_msg); /* what file (source) did the allocation */ const char *MEMDBG_get_file(const void *ptr, const char **err_msg); /* what file (source) line number did the allocation */ unsigned MEMDBG_get_line(const void *ptr, const char **err_msg); /* * these functions allow taking a memory snapshot, calling some code, then validating that memory * is the same after the code. This will help catch memory leaks and other such problems, within * formats and such. Simply get the snapshot, run self tests (or other), when it exits, check * the snapshot to make sure nothing leaked. */ /* returning a struct (or passing as params it not super efficient but this is done so infrequently that this is not an issue. */ MEMDBG_HANDLE MEMDBG_getSnapshot(int id); /* will not exit on leaks. Does exit, on memory overwrite corruption. */ void MEMDBG_checkSnapshot(MEMDBG_HANDLE); /* same as MEMDBG_checkSnapshot() but if exit_on_any_leaks is true, will also exit if leaks found. */ void MEMDBG_checkSnapshot_possible_exit_on_error(MEMDBG_HANDLE, int exit_on_any_leaks); /* * the allocations from mem_alloc_tiny() must call this function to flag the memory they allocate * so it is not flagged as a leak, by these HANDLE snapshot functions. 'tiny' memory is expected * to leak, until program exit. At that time, any that was not freed, will be shown as leaked. * THIS function is also thread safe. The other checkSnapshot functions are NOT thread safe. */ void MEMDBG_tag_mem_from_alloc_tiny(void *); extern void MEMDBG_libc_free(void *); extern void *MEMDBG_libc_alloc(size_t size); extern void *MEMDBG_libc_calloc(size_t count, size_t size); #else #define libc_alloc alloc #define libc_calloc calloc #define libc_malloc malloc #define libc_free free #define MemDbg_Used(a) 0 #define MemDbg_Display(a) #define MemDbg_Validate(a) #define MemDbg_Validate_msg(a,b) #define MemDbg_Validate_msg2(a,b,c) #define MEMDBG_PROGRAM_EXIT_CHECKS(a) #define MEMDBG_tag_mem_from_alloc_tiny(a) #define MEMDBG_HANDLE int #define MEMDBG_getSnapshot(a) 0 #define MEMDBG_checkSnapshot(a) if(a) printf(" \b") #define MEMDBG_checkSnapshot_possible_exit_on_error(a, b) if(a) printf(" \b") #endif /* MEMDBG_ON */ #endif /* __MEMDBG_H_ */
GB_unop__trunc_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__trunc_fc32_fc32) // op(A') function: GB (_unop_tran__trunc_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_ctruncf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_ctruncf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_ctruncf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TRUNC || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__trunc_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_ctruncf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_ctruncf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__trunc_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__trunc_fc32_fc32) // op(A') function: GB (_unop_tran__trunc_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_ctruncf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_ctruncf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_ctruncf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TRUNC || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__trunc_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_ctruncf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_ctruncf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__trunc_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__trunc_fc32_fc32) // op(A') function: GB (_unop_tran__trunc_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_ctruncf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_ctruncf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_ctruncf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TRUNC || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__trunc_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_ctruncf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_ctruncf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__trunc_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sgemm.c
#include <stdio.h> #include <stdlib.h> #include <inttypes.h> #include <math.h> #include <sys/time.h> #include <omp.h> /* * C = [n, q] = A[n, m] * B[m, q] */ enum { N = 1000, M = 1000, Q = 1000, NREPS = 5, }; double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } /* Matrix multiplication C[n, q] = A[n, m] * B[m, q] */ void sgemm_phi(float *a, float *b, float *c, int n, int m, int q) { #pragma offload target(mic) in(a:length(n * m)) in(b:length(m * q)) out(c:length(n * q)) { #pragma omp parallel { int k = 0; #pragma omp for for (int i = 0; i < n; i++) for (int j = 0; j < q; j++) c[k++] = 0.0; #pragma omp for for (int i = 0; i < n; i++) { for (int k = 0; k < m; k++) { for (int j = 0; j < q; j++) c[i * q + j] += a[i * m + k] * b[k * q + j]; } } } } } double run_phi(const char *msg, void (*sgemm_fun)(float *, float *, float *, int, int, int)) { double gflop = 2.0 * N * Q * M * 1E-9; float *a, *b, *c; a = malloc(sizeof(*a) * N * M); b = malloc(sizeof(*b) * M * Q); c = malloc(sizeof(*c) * N * Q); if (a == NULL || b == NULL || c == NULL) { fprintf(stderr, "No enough memory\n"); exit(EXIT_FAILURE); } srand(0); for (int i = 0; i < N; i++) { for (int j = 0; j < M; j++) a[i * M + j] = rand() % 100; // 1.0; } for (int i = 0; i < M; i++) { for (int j = 0; j < Q; j++) b[i * Q + j] = rand() % 100; // 2.0; } /* Warmup */ double twarmup = wtime(); sgemm_fun(a, b, c, N, M, Q); twarmup = wtime() - twarmup; /* Measures */ double tavg = 0.0; double tmin = 1E6; double tmax = 0.0; for (int i = 0; i < NREPS; i++) { double t = wtime(); sgemm_fun(a, b, c, N, M, Q); t = wtime() - t; tavg += t; tmin = (tmin > t) ? t : tmin; tmax = (tmax < t) ? t : tmax; } tavg /= NREPS; printf("%s (%d runs): perf %.2f GFLOPS; time: tavg %.6f, tmin %.6f, tmax %.6f, twarmup %.6f\n", msg, NREPS, gflop / tavg, tavg, tmin, tmax, twarmup); free(c); free(b); free(a); return tavg; } int main(int argc, char **argv) { printf("SGEMM N = %d, M = %d, Q = %d\n", N, M, Q); char buf[256]; sprintf(buf, "Phi OMP %s", getenv("MIC_OMP_NUM_THREADS")); run_phi(buf, &sgemm_phi); return 0; }
#include <stdio.h> #include <stdlib.h> #include <inttypes.h> #include <math.h> #include <sys/time.h> #include <omp.h> /* * C = [n, q] = A[n, m] * B[m, q] */ enum { N = 1000, M = 1000, Q = 1000, NREPS = 5, }; double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } /* Matrix multiplication C[n, q] = A[n, m] * B[m, q] */ void sgemm_phi(float *a, float *b, float *c, int n, int m, int q) { #pragma offload target(mic) in(a:length(n * m)) in(b:length(m * q)) out(c:length(n * q)) { int k = 0; for (int i = 0; i < n; i++) for (int j = 0; j < q; j++) c[k++] = 0.0; for (int i = 0; i < n; i++) { for (int k = 0; k < m; k++) { for (int j = 0; j < q; j++) c[i * q + j] += a[i * m + k] * b[k * q + j]; } } } } double run_phi(const char *msg, void (*sgemm_fun) (float *, float *, float *, int, int, int)) { double gflop = 2.0 * N * Q * M * 1E-9; float *a, *b, *c; a = malloc(sizeof(*a) * N * M); b = malloc(sizeof(*b) * M * Q); c = malloc(sizeof(*c) * N * Q); if (a == NULL || b == NULL || c == NULL) { fprintf(stderr, "No enough memory\n"); exit(EXIT_FAILURE); } srand(0); for (int i = 0; i < N; i++) { for (int j = 0; j < M; j++) a[i * M + j] = rand() % 100; //1.0; } for (int i = 0; i < M; i++) { for (int j = 0; j < Q; j++) b[i * Q + j] = rand() % 100; //2.0; } /* Warmup */ double twarmup = wtime(); sgemm_fun(a, b, c, N, M, Q); twarmup = wtime() - twarmup; /* Measures */ double tavg = 0.0; double tmin = 1E6; double tmax = 0.0; for (int i = 0; i < NREPS; i++) { double t = wtime(); sgemm_fun(a, b, c, N, M, Q); t = wtime() - t; tavg += t; tmin = (tmin > t) ? t : tmin; tmax = (tmax < t) ? t : tmax; } tavg /= NREPS; printf("%s (%d runs): perf %.2f GFLOPS; time: tavg %.6f, tmin %.6f, tmax %.6f, twarmup %.6f\n", msg, NREPS, gflop / tavg, tavg, tmin, tmax, twarmup); free(c); free(b); free(a); return tavg; } int main(int argc, char **argv) { printf("SGEMM N = %d, M = %d, Q = %d\n", N, M, Q); char buf[256]; sprintf(buf, "Phi OMP %s", getenv("MIC_OMP_NUM_THREADS")); run_phi(buf, &sgemm_phi); return 0; }
#include <stdio.h> #include <stdlib.h> #include <inttypes.h> #include <math.h> #include <sys/time.h> #include <omp.h> /* * C = [n, q] = A[n, m] * B[m, q] */ enum { N = 1000, M = 1000, Q = 1000, NREPS = 5, }; double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } /* Matrix multiplication C[n, q] = A[n, m] * B[m, q] */ void sgemm_phi(float *a, float *b, float *c, int n, int m, int q) { #pragma offload target(mic) in(a:length(n * m)) in(b:length(m * q)) out(c:length(n * q)) { #pragma omp parallel { int k = 0; #pragma omp for for (int i = 0; i < n; i++) for (int j = 0; j < q; j++) c[k++] = 0.0; #pragma omp for for (int i = 0; i < n; i++) { for (int k = 0; k < m; k++) { for (int j = 0; j < q; j++) c[i * q + j] += a[i * m + k] * b[k * q + j]; } } } } } double run_phi(const char *msg, void (*sgemm_fun) (float *, float *, float *, int, int, int)) { double gflop = 2.0 * N * Q * M * 1E-9; float *a, *b, *c; a = malloc(sizeof(*a) * N * M); b = malloc(sizeof(*b) * M * Q); c = malloc(sizeof(*c) * N * Q); if (a == NULL || b == NULL || c == NULL) { fprintf(stderr, "No enough memory\n"); exit(EXIT_FAILURE); } srand(0); for (int i = 0; i < N; i++) { for (int j = 0; j < M; j++) a[i * M + j] = rand() % 100; //1.0; } for (int i = 0; i < M; i++) { for (int j = 0; j < Q; j++) b[i * Q + j] = rand() % 100; //2.0; } /* Warmup */ double twarmup = wtime(); sgemm_fun(a, b, c, N, M, Q); twarmup = wtime() - twarmup; /* Measures */ double tavg = 0.0; double tmin = 1E6; double tmax = 0.0; for (int i = 0; i < NREPS; i++) { double t = wtime(); sgemm_fun(a, b, c, N, M, Q); t = wtime() - t; tavg += t; tmin = (tmin > t) ? t : tmin; tmax = (tmax < t) ? t : tmax; } tavg /= NREPS; printf("%s (%d runs): perf %.2f GFLOPS; time: tavg %.6f, tmin %.6f, tmax %.6f, twarmup %.6f\n", msg, NREPS, gflop / tavg, tavg, tmin, tmax, twarmup); free(c); free(b); free(a); return tavg; } int main(int argc, char **argv) { printf("SGEMM N = %d, M = %d, Q = %d\n", N, M, Q); char buf[256]; sprintf(buf, "Phi OMP %s", getenv("MIC_OMP_NUM_THREADS")); run_phi(buf, &sgemm_phi); return 0; }
GB_unop__lnot_int16_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__lnot_int16_int16) // op(A') function: GB (_unop_tran__lnot_int16_int16) // C type: int16_t // A type: int16_t // cast: int16_t cij = aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CAST(z, aij) \ int16_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = aij ; \ Cx [pC] = !(z != 0) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__lnot_int16_int16) ( int16_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; int16_t z = aij ; Cx [p] = !(z != 0) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; int16_t z = aij ; Cx [p] = !(z != 0) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__lnot_int16_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__lnot_int16_int16) // op(A') function: GB (_unop_tran__lnot_int16_int16) // C type: int16_t // A type: int16_t // cast: int16_t cij = aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CAST(z, aij) \ int16_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = aij ; \ Cx [pC] = !(z != 0) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__lnot_int16_int16) ( int16_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; int16_t z = aij ; Cx [p] = !(z != 0) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; int16_t z = aij ; Cx [p] = !(z != 0) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__lnot_int16_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__lnot_int16_int16) // op(A') function: GB (_unop_tran__lnot_int16_int16) // C type: int16_t // A type: int16_t // cast: int16_t cij = aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CAST(z, aij) \ int16_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = aij ; \ Cx [pC] = !(z != 0) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__lnot_int16_int16) ( int16_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; int16_t z = aij ; Cx [p] = !(z != 0) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; int16_t z = aij ; Cx [p] = !(z != 0) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__lnot_int16_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dropout-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file dropout-inl.h * \brief * \author Bing Xu, Da Zheng, Hang Zhang */ #ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_ #define MXNET_OPERATOR_NN_DROPOUT_INL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <map> #include <vector> #include <string> #include <utility> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../random/sampler.h" #include "../tensor/elemwise_binary_broadcast_op.h" #if (MSHADOW_USE_MKL == 1) && defined(_OPENMP) && !defined(__CUDACC__) #define MXNET_USE_MKL_DROPOUT 1 #endif #if MXNET_USE_MKL_DROPOUT #include <omp.h> #include <mkl_vml_functions.h> #include <mkl_vsl.h> #endif // MXNET_USE_MKL_DROPOUT #define MXNET_USE_CUDNN_DROPOUT MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 7 namespace dropout { enum DropoutOpInputs {kData}; enum DropoutOpOutputs {kOut, kMask}; enum DropoutOpForwardResource {kRandom}; enum DropoutOpMode {kTraining, kAlways}; } // namespace dropout namespace mxnet { namespace op { const int MAX_DIM = 5; struct DropoutParam : public dmlc::Parameter<DropoutParam> { float p; int mode; mxnet::TShape axes; dmlc::optional<bool> cudnn_off; DMLC_DECLARE_PARAMETER(DropoutParam) { DMLC_DECLARE_FIELD(p).set_default(0.5) .set_range(0, 1) .describe("Fraction of the input that gets dropped out during training time."); DMLC_DECLARE_FIELD(mode) .add_enum("training", dropout::kTraining) .add_enum("always", dropout::kAlways) .set_default(dropout::kTraining) .describe("Whether to only turn on dropout during training or to also turn on for inference."); DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, 0)) .describe("Axes for variational dropout kernel."); DMLC_DECLARE_FIELD(cudnn_off).set_default(dmlc::optional<bool>(false)) .describe("Whether to turn off cudnn in dropout operator. " "This option is ignored if axes is specified."); } }; // struct DropoutParam template<typename xpu, typename DType> class DropoutOp { #if MXNET_USE_MKL_DROPOUT static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen, int n, double p, int* r) { typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1); const int seed = 17 + abs(genImpl.rand() % 4096); CHECK_GE(seed, 0); const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel num_threads(nthr) { const int ithr = omp_get_thread_num(); const int avg_amount = (n + nthr - 1) / nthr; const int my_offset = ithr * avg_amount; const int my_amount = std::min(my_offset + avg_amount, n) - my_offset; if (my_amount > 0) { VSLStreamStatePtr stream; vslNewStream(&stream, VSL_BRNG_MCG31, seed); vslSkipAheadStream(stream, my_offset); viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p); vslDeleteStream(&stream); } } } static inline bool MKLAvailable() { // BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer // will be too small, so we can;t use MKL in those cases return sizeof(DType) >= sizeof(int); } // MKL forward pass inline void MKLForward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data) { Stream<xpu> *s = ctx.get_stream<xpu>(); RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s); DType *outptr = out.dptr_; DType *dataptr = data.dptr_; auto maskptr = reinterpret_cast<int *>(mask.dptr_); int count = mask.shape_[0] * mask.shape_[1]; if (sizeof(DType) > sizeof(int)) { // allocating new buffer to avoiding memory overlapping between `mask.dptr_` and `maskptr` Tensor<xpu, 1, int> temp = ctx.requested[1].get_space_typed<xpu, 1, int>(Shape1(count), s); maskptr = temp.dptr_; } BernoulliGenerate(*pgen, count, this->pkeep_, maskptr); const float pk_1 = 1.0f / this->pkeep_; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { const DType maskVal = static_cast<DType>(maskptr[i]) * pk_1; outptr[i] = dataptr[i] * maskVal; mask.dptr_[i] = maskVal; } } // MKL backward pass inline void MKLBackward(const OpContext &ctx, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &out_data, const std::vector<TBlob> &out_grad) { Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s); DType *ingradptr = gdata.dptr_; const DType *outgradptr = grad.dptr_; const DType *maskptr = mask.dptr_; const int count = mask.shape_[0] * mask.shape_[1]; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { ingradptr[i] = outgradptr[i] * maskptr[i]; } } #endif // #if MXNET_USE_MKL_DROPOUT public: /*! * \brief Dropout kernel, compute dropout tensor */ struct DropoutKernel { /*! * \brief Dropout kernel function * \param id Thread number (0-based representing count) * \param gen Random number generator * \param N Total number of items in the output * \param step Step between items, related to parallelism * \param dropout_out Output dropout values * \param mask_out Output mask (is multiplied to create dropout output, may be 0) * \param input_data Input data to perform the dropout on * \param pkeep Dropout rate (keep when the generated random number is less than this value) */ MSHADOW_XINLINE static void Map(index_t id, RandGenerator<xpu, DType> gen, const index_t N, const index_t step, DType *dropout_out, DType *mask_out, const DType *input_data, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); dropout_out[i] = input_data[i] * mask_out[i]; }); } }; struct BernoulliKernel { /*! \brief Bernoulli kernel for generating mask */ MSHADOW_XINLINE static void Map(index_t id, RandGenerator<xpu, DType> gen, const index_t N, const index_t step, DType *mask_out, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); }); } }; explicit DropoutOp(const DropoutParam &param, Context ctx) { this->pkeep_ = 1.0f - param.p; this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode); this->axes_ = param.axes; this->dropout_passthrough_ = true; #if MXNET_USE_CUDNN_DROPOUT this->cudnn_off_ = param.cudnn_off && param.cudnn_off.value(); this->ctx_ = ctx; if (ctx.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { dtype_ = mshadow::DataType<DType>::kCudnnFlag; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dx_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dy_desc_)); CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } ~DropoutOp() { #if MXNET_USE_CUDNN_DROPOUT if (this->ctx_.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { CUDNN_CALL(cudnnDestroyTensorDescriptor(x_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(y_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dx_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dy_desc_)); CUDNN_CALL(cudnnDestroyDropoutDescriptor(dropout_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) inline bool CuDNNAvailable() { return this->pkeep_ > 0 && !this->cudnn_off_; } inline void CuDNNForward(const OpContext &ctx, const TBlob &in, const TBlob &mask, const TBlob &out) { Stream<xpu> *s = ctx.get_stream<xpu>(); // set dropout state. ctx.requested[0].get_cudnn_dropout_desc(&dropout_desc_, s, 1.0f - this->pkeep_); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = out.Size(); stride[0] = out.Size(); stride[1] = out.Size(); stride[2] = out.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(x_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(y_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutGetReserveSpaceSize(x_desc_, &dropout_reserve_byte_)); // cudnn uses bits to record the positions that are dropped, so reserve bytes is always // 1/8 of input size. CHECK_GE(mask.Size() * sizeof(DType), dropout_reserve_byte_) << "The size of the mask space is smaller than the required cudnn reserved space."; CUDNN_CALL(cudnnDropoutForward(s->dnn_handle_, dropout_desc_, x_desc_, in.dptr<DType>(), y_desc_, out.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } inline void CuDNNBackward(const OpContext &ctx, const TBlob &out_grad, const TBlob &mask, const TBlob &in_grad) { Stream<xpu> *s = ctx.get_stream<xpu>(); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = in_grad.Size(); stride[0] = in_grad.Size(); stride[1] = in_grad.Size(); stride[2] = in_grad.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(dy_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(dx_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutBackward(s->dnn_handle_, dropout_desc_, dy_desc_, out_grad.dptr<DType>(), dx_desc_, in_grad.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data) { this->dropout_passthrough_ = true; if (req[dropout::kOut] != kNullOp) { CHECK_EQ(in_data.size(), 1U); if (ctx.is_train) { CHECK_EQ(out_data.size(), 2U); } Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob &in = in_data[dropout::kData]; const TBlob &out = out_data[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; if (this->pkeep_ < 1 && (ctx.is_train || this->mode_ == dropout::kAlways)) { this->dropout_passthrough_ = false; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLForward(ctx, in_data, out_data); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNForward(ctx, in, mask, out); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); CHECK(req[dropout::kOut] != kAddTo); LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(), out.dptr<DType>(), mask.dptr<DType>(), in.dptr<DType>(), this->pkeep_); return; } else { RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); // initialize the mask LaunchRNG<BernoulliKernel, xpu>(s, pgen, mask.Size(), mask.dptr<DType>(), this->pkeep_); // broadcast mul mxnet::TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(in.shape_, mask.shape_, out.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[dropout::kOut], lstride, rstride, oshape, in.dptr<DType>(), mask.dptr<DType>(), out.dptr<DType>()); }); } } } else { if (req[dropout::kOut] == kWriteInplace) return; MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>()); }); } } } void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); if (!this->dropout_passthrough_) { this->dropout_passthrough_ = true; const TBlob &gdata = in_grad[dropout::kData]; const TBlob &grad = out_grad[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLBackward(ctx, in_grad, out_data, out_grad); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNBackward(ctx, grad, mask, gdata); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) // standard case for dropout CHECK_EQ(grad.Size(), mask.Size()); MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); return; } else { // broardcast mul mxnet::TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(grad.shape_, mask.shape_, gdata.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[0], lstride, rstride, oshape, grad.dptr<DType>(), mask.dptr<DType>(), gdata.dptr<DType>()); }); } } } else { const TBlob& gdata = in_grad[dropout::kData]; const TBlob& grad = out_grad[dropout::kOut]; MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>()); }); } } private: /*! \brief Dropout rate (keep when the generated random number is less than this value) */ real_t pkeep_; /*! \brief Dropout mode */ dropout::DropoutOpMode mode_; /*! \brief Axes on which dropout mask is shared in the form of broadcast multiply */ mxnet::TShape axes_; /*! \brief Flag to record whether forward is executed in pass-through mode */ bool dropout_passthrough_; #if MXNET_USE_CUDNN_DROPOUT bool cudnn_off_; Context ctx_; cudnnDataType_t dtype_; cudnnDropoutDescriptor_t dropout_desc_; size_t dropout_reserve_byte_; cudnnTensorDescriptor_t x_desc_, y_desc_, dx_desc_, dy_desc_; #endif // MXNET_USE_CUDNN_DROPOUT }; // class DropoutOp template<typename xpu> void DropoutCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Forward(ctx, inputs, req, outputs); }); } template<typename xpu> void DropoutGradCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1); CHECK_EQ(req.size(), 1); std::vector<TBlob> out_grads(2); std::vector<TBlob> out_data(2); out_grads[dropout::kOut] = inputs[0]; out_data[dropout::kMask] = inputs[1]; MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Backward(ctx, out_grads, out_data, req, outputs); }); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
/*! * Copyright (c) 2015 by Contributors * \file dropout-inl.h * \brief * \author Bing Xu, Da Zheng, Hang Zhang */ #ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_ #define MXNET_OPERATOR_NN_DROPOUT_INL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <map> #include <vector> #include <string> #include <utility> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../random/sampler.h" #include "../tensor/elemwise_binary_broadcast_op.h" #if (MSHADOW_USE_MKL == 1) && defined(_OPENMP) && !defined(__CUDACC__) #define MXNET_USE_MKL_DROPOUT 1 #endif #if MXNET_USE_MKL_DROPOUT #include <omp.h> #include <mkl_vml_functions.h> #include <mkl_vsl.h> #endif // MXNET_USE_MKL_DROPOUT #define MXNET_USE_CUDNN_DROPOUT MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 7 namespace dropout { enum DropoutOpInputs {kData}; enum DropoutOpOutputs {kOut, kMask}; enum DropoutOpForwardResource {kRandom}; enum DropoutOpMode {kTraining, kAlways}; } // namespace dropout namespace mxnet { namespace op { const int MAX_DIM = 5; struct DropoutParam : public dmlc::Parameter<DropoutParam> { float p; int mode; mxnet::TShape axes; dmlc::optional<bool> cudnn_off; DMLC_DECLARE_PARAMETER(DropoutParam) { DMLC_DECLARE_FIELD(p).set_default(0.5) .set_range(0, 1) .describe("Fraction of the input that gets dropped out during training time."); DMLC_DECLARE_FIELD(mode) .add_enum("training", dropout::kTraining) .add_enum("always", dropout::kAlways) .set_default(dropout::kTraining) .describe("Whether to only turn on dropout during training or to also turn on for inference."); DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, 0)) .describe("Axes for variational dropout kernel."); DMLC_DECLARE_FIELD(cudnn_off).set_default(dmlc::optional<bool>(false)) .describe("Whether to turn off cudnn in dropout operator. " "This option is ignored if axes is specified."); } }; // struct DropoutParam template<typename xpu, typename DType> class DropoutOp { #if MXNET_USE_MKL_DROPOUT static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen, int n, double p, int* r) { typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1); const int seed = 17 + abs(genImpl.rand() % 4096); CHECK_GE(seed, 0); const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); const int ithr = omp_get_thread_num(); const int avg_amount = (n + nthr - 1) / nthr; const int my_offset = ithr * avg_amount; const int my_amount = std::min(my_offset + avg_amount, n) - my_offset; if (my_amount > 0) { VSLStreamStatePtr stream; vslNewStream(&stream, VSL_BRNG_MCG31, seed); vslSkipAheadStream(stream, my_offset); viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p); vslDeleteStream(&stream); } } static inline bool MKLAvailable() { // BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer // will be too small, so we can;t use MKL in those cases return sizeof(DType) >= sizeof(int); } // MKL forward pass inline void MKLForward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data) { Stream<xpu> *s = ctx.get_stream<xpu>(); RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s); DType *outptr = out.dptr_; DType *dataptr = data.dptr_; auto maskptr = reinterpret_cast<int *>(mask.dptr_); int count = mask.shape_[0] * mask.shape_[1]; if (sizeof(DType) > sizeof(int)) { // allocating new buffer to avoiding memory overlapping between `mask.dptr_` and `maskptr` Tensor<xpu, 1, int> temp = ctx.requested[1].get_space_typed<xpu, 1, int>(Shape1(count), s); maskptr = temp.dptr_; } BernoulliGenerate(*pgen, count, this->pkeep_, maskptr); const float pk_1 = 1.0f / this->pkeep_; for (int i = 0; i < count; ++i) { const DType maskVal = static_cast<DType>(maskptr[i]) * pk_1; outptr[i] = dataptr[i] * maskVal; mask.dptr_[i] = maskVal; } } // MKL backward pass inline void MKLBackward(const OpContext &ctx, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &out_data, const std::vector<TBlob> &out_grad) { Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s); DType *ingradptr = gdata.dptr_; const DType *outgradptr = grad.dptr_; const DType *maskptr = mask.dptr_; const int count = mask.shape_[0] * mask.shape_[1]; for (int i = 0; i < count; ++i) { ingradptr[i] = outgradptr[i] * maskptr[i]; } } #endif // #if MXNET_USE_MKL_DROPOUT public: /*! * \brief Dropout kernel, compute dropout tensor */ struct DropoutKernel { /*! * \brief Dropout kernel function * \param id Thread number (0-based representing count) * \param gen Random number generator * \param N Total number of items in the output * \param step Step between items, related to parallelism * \param dropout_out Output dropout values * \param mask_out Output mask (is multiplied to create dropout output, may be 0) * \param input_data Input data to perform the dropout on * \param pkeep Dropout rate (keep when the generated random number is less than this value) */ MSHADOW_XINLINE static void Map(index_t id, RandGenerator<xpu, DType> gen, const index_t N, const index_t step, DType *dropout_out, DType *mask_out, const DType *input_data, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); dropout_out[i] = input_data[i] * mask_out[i]; }); } }; struct BernoulliKernel { /*! \brief Bernoulli kernel for generating mask */ MSHADOW_XINLINE static void Map(index_t id, RandGenerator<xpu, DType> gen, const index_t N, const index_t step, DType *mask_out, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); }); } }; explicit DropoutOp(const DropoutParam &param, Context ctx) { this->pkeep_ = 1.0f - param.p; this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode); this->axes_ = param.axes; this->dropout_passthrough_ = true; #if MXNET_USE_CUDNN_DROPOUT this->cudnn_off_ = param.cudnn_off && param.cudnn_off.value(); this->ctx_ = ctx; if (ctx.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { dtype_ = mshadow::DataType<DType>::kCudnnFlag; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dx_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dy_desc_)); CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } ~DropoutOp() { #if MXNET_USE_CUDNN_DROPOUT if (this->ctx_.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { CUDNN_CALL(cudnnDestroyTensorDescriptor(x_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(y_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dx_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dy_desc_)); CUDNN_CALL(cudnnDestroyDropoutDescriptor(dropout_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) inline bool CuDNNAvailable() { return this->pkeep_ > 0 && !this->cudnn_off_; } inline void CuDNNForward(const OpContext &ctx, const TBlob &in, const TBlob &mask, const TBlob &out) { Stream<xpu> *s = ctx.get_stream<xpu>(); // set dropout state. ctx.requested[0].get_cudnn_dropout_desc(&dropout_desc_, s, 1.0f - this->pkeep_); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = out.Size(); stride[0] = out.Size(); stride[1] = out.Size(); stride[2] = out.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(x_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(y_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutGetReserveSpaceSize(x_desc_, &dropout_reserve_byte_)); // cudnn uses bits to record the positions that are dropped, so reserve bytes is always // 1/8 of input size. CHECK_GE(mask.Size() * sizeof(DType), dropout_reserve_byte_) << "The size of the mask space is smaller than the required cudnn reserved space."; CUDNN_CALL(cudnnDropoutForward(s->dnn_handle_, dropout_desc_, x_desc_, in.dptr<DType>(), y_desc_, out.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } inline void CuDNNBackward(const OpContext &ctx, const TBlob &out_grad, const TBlob &mask, const TBlob &in_grad) { Stream<xpu> *s = ctx.get_stream<xpu>(); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = in_grad.Size(); stride[0] = in_grad.Size(); stride[1] = in_grad.Size(); stride[2] = in_grad.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(dy_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(dx_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutBackward(s->dnn_handle_, dropout_desc_, dy_desc_, out_grad.dptr<DType>(), dx_desc_, in_grad.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data) { this->dropout_passthrough_ = true; if (req[dropout::kOut] != kNullOp) { CHECK_EQ(in_data.size(), 1U); if (ctx.is_train) { CHECK_EQ(out_data.size(), 2U); } Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob &in = in_data[dropout::kData]; const TBlob &out = out_data[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; if (this->pkeep_ < 1 && (ctx.is_train || this->mode_ == dropout::kAlways)) { this->dropout_passthrough_ = false; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLForward(ctx, in_data, out_data); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNForward(ctx, in, mask, out); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); CHECK(req[dropout::kOut] != kAddTo); LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(), out.dptr<DType>(), mask.dptr<DType>(), in.dptr<DType>(), this->pkeep_); return; } else { RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); // initialize the mask LaunchRNG<BernoulliKernel, xpu>(s, pgen, mask.Size(), mask.dptr<DType>(), this->pkeep_); // broadcast mul mxnet::TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(in.shape_, mask.shape_, out.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[dropout::kOut], lstride, rstride, oshape, in.dptr<DType>(), mask.dptr<DType>(), out.dptr<DType>()); }); } } } else { if (req[dropout::kOut] == kWriteInplace) return; MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>()); }); } } } void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); if (!this->dropout_passthrough_) { this->dropout_passthrough_ = true; const TBlob &gdata = in_grad[dropout::kData]; const TBlob &grad = out_grad[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLBackward(ctx, in_grad, out_data, out_grad); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNBackward(ctx, grad, mask, gdata); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) // standard case for dropout CHECK_EQ(grad.Size(), mask.Size()); MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); return; } else { // broardcast mul mxnet::TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(grad.shape_, mask.shape_, gdata.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[0], lstride, rstride, oshape, grad.dptr<DType>(), mask.dptr<DType>(), gdata.dptr<DType>()); }); } } } else { const TBlob& gdata = in_grad[dropout::kData]; const TBlob& grad = out_grad[dropout::kOut]; MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>()); }); } } private: /*! \brief Dropout rate (keep when the generated random number is less than this value) */ real_t pkeep_; /*! \brief Dropout mode */ dropout::DropoutOpMode mode_; /*! \brief Axes on which dropout mask is shared in the form of broadcast multiply */ mxnet::TShape axes_; /*! \brief Flag to record whether forward is executed in pass-through mode */ bool dropout_passthrough_; #if MXNET_USE_CUDNN_DROPOUT bool cudnn_off_; Context ctx_; cudnnDataType_t dtype_; cudnnDropoutDescriptor_t dropout_desc_; size_t dropout_reserve_byte_; cudnnTensorDescriptor_t x_desc_, y_desc_, dx_desc_, dy_desc_; #endif // MXNET_USE_CUDNN_DROPOUT }; // class DropoutOp template<typename xpu> void DropoutCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Forward(ctx, inputs, req, outputs); }); } template<typename xpu> void DropoutGradCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1); CHECK_EQ(req.size(), 1); std::vector<TBlob> out_grads(2); std::vector<TBlob> out_data(2); out_grads[dropout::kOut] = inputs[0]; out_data[dropout::kMask] = inputs[1]; MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Backward(ctx, out_grads, out_data, req, outputs); }); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
/*! * Copyright (c) 2015 by Contributors * \file dropout-inl.h * \brief * \author Bing Xu, Da Zheng, Hang Zhang */ #ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_ #define MXNET_OPERATOR_NN_DROPOUT_INL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <map> #include <vector> #include <string> #include <utility> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../random/sampler.h" #include "../tensor/elemwise_binary_broadcast_op.h" #if (MSHADOW_USE_MKL == 1) && defined(_OPENMP) && !defined(__CUDACC__) #define MXNET_USE_MKL_DROPOUT 1 #endif #if MXNET_USE_MKL_DROPOUT #include <omp.h> #include <mkl_vml_functions.h> #include <mkl_vsl.h> #endif // MXNET_USE_MKL_DROPOUT #define MXNET_USE_CUDNN_DROPOUT MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 7 namespace dropout { enum DropoutOpInputs {kData}; enum DropoutOpOutputs {kOut, kMask}; enum DropoutOpForwardResource {kRandom}; enum DropoutOpMode {kTraining, kAlways}; } // namespace dropout namespace mxnet { namespace op { const int MAX_DIM = 5; struct DropoutParam : public dmlc::Parameter<DropoutParam> { float p; int mode; mxnet::TShape axes; dmlc::optional<bool> cudnn_off; DMLC_DECLARE_PARAMETER(DropoutParam) { DMLC_DECLARE_FIELD(p).set_default(0.5) .set_range(0, 1) .describe("Fraction of the input that gets dropped out during training time."); DMLC_DECLARE_FIELD(mode) .add_enum("training", dropout::kTraining) .add_enum("always", dropout::kAlways) .set_default(dropout::kTraining) .describe("Whether to only turn on dropout during training or to also turn on for inference."); DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, 0)) .describe("Axes for variational dropout kernel."); DMLC_DECLARE_FIELD(cudnn_off).set_default(dmlc::optional<bool>(false)) .describe("Whether to turn off cudnn in dropout operator. " "This option is ignored if axes is specified."); } }; // struct DropoutParam template<typename xpu, typename DType> class DropoutOp { #if MXNET_USE_MKL_DROPOUT static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen, int n, double p, int* r) { typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1); const int seed = 17 + abs(genImpl.rand() % 4096); CHECK_GE(seed, 0); const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel num_threads(nthr) { const int ithr = omp_get_thread_num(); const int avg_amount = (n + nthr - 1) / nthr; const int my_offset = ithr * avg_amount; const int my_amount = std::min(my_offset + avg_amount, n) - my_offset; if (my_amount > 0) { VSLStreamStatePtr stream; vslNewStream(&stream, VSL_BRNG_MCG31, seed); vslSkipAheadStream(stream, my_offset); viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p); vslDeleteStream(&stream); } } } static inline bool MKLAvailable() { // BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer // will be too small, so we can;t use MKL in those cases return sizeof(DType) >= sizeof(int); } // MKL forward pass inline void MKLForward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data) { Stream<xpu> *s = ctx.get_stream<xpu>(); RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s); DType *outptr = out.dptr_; DType *dataptr = data.dptr_; auto maskptr = reinterpret_cast<int *>(mask.dptr_); int count = mask.shape_[0] * mask.shape_[1]; if (sizeof(DType) > sizeof(int)) { // allocating new buffer to avoiding memory overlapping between `mask.dptr_` and `maskptr` Tensor<xpu, 1, int> temp = ctx.requested[1].get_space_typed<xpu, 1, int>(Shape1(count), s); maskptr = temp.dptr_; } BernoulliGenerate(*pgen, count, this->pkeep_, maskptr); const float pk_1 = 1.0f / this->pkeep_; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { const DType maskVal = static_cast<DType>(maskptr[i]) * pk_1; outptr[i] = dataptr[i] * maskVal; mask.dptr_[i] = maskVal; } } // MKL backward pass inline void MKLBackward(const OpContext &ctx, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &out_data, const std::vector<TBlob> &out_grad) { Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s); DType *ingradptr = gdata.dptr_; const DType *outgradptr = grad.dptr_; const DType *maskptr = mask.dptr_; const int count = mask.shape_[0] * mask.shape_[1]; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { ingradptr[i] = outgradptr[i] * maskptr[i]; } } #endif // #if MXNET_USE_MKL_DROPOUT public: /*! * \brief Dropout kernel, compute dropout tensor */ struct DropoutKernel { /*! * \brief Dropout kernel function * \param id Thread number (0-based representing count) * \param gen Random number generator * \param N Total number of items in the output * \param step Step between items, related to parallelism * \param dropout_out Output dropout values * \param mask_out Output mask (is multiplied to create dropout output, may be 0) * \param input_data Input data to perform the dropout on * \param pkeep Dropout rate (keep when the generated random number is less than this value) */ MSHADOW_XINLINE static void Map(index_t id, RandGenerator<xpu, DType> gen, const index_t N, const index_t step, DType *dropout_out, DType *mask_out, const DType *input_data, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); dropout_out[i] = input_data[i] * mask_out[i]; }); } }; struct BernoulliKernel { /*! \brief Bernoulli kernel for generating mask */ MSHADOW_XINLINE static void Map(index_t id, RandGenerator<xpu, DType> gen, const index_t N, const index_t step, DType *mask_out, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); }); } }; explicit DropoutOp(const DropoutParam &param, Context ctx) { this->pkeep_ = 1.0f - param.p; this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode); this->axes_ = param.axes; this->dropout_passthrough_ = true; #if MXNET_USE_CUDNN_DROPOUT this->cudnn_off_ = param.cudnn_off && param.cudnn_off.value(); this->ctx_ = ctx; if (ctx.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { dtype_ = mshadow::DataType<DType>::kCudnnFlag; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dx_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dy_desc_)); CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } ~DropoutOp() { #if MXNET_USE_CUDNN_DROPOUT if (this->ctx_.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { CUDNN_CALL(cudnnDestroyTensorDescriptor(x_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(y_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dx_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dy_desc_)); CUDNN_CALL(cudnnDestroyDropoutDescriptor(dropout_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) inline bool CuDNNAvailable() { return this->pkeep_ > 0 && !this->cudnn_off_; } inline void CuDNNForward(const OpContext &ctx, const TBlob &in, const TBlob &mask, const TBlob &out) { Stream<xpu> *s = ctx.get_stream<xpu>(); // set dropout state. ctx.requested[0].get_cudnn_dropout_desc(&dropout_desc_, s, 1.0f - this->pkeep_); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = out.Size(); stride[0] = out.Size(); stride[1] = out.Size(); stride[2] = out.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(x_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(y_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutGetReserveSpaceSize(x_desc_, &dropout_reserve_byte_)); // cudnn uses bits to record the positions that are dropped, so reserve bytes is always // 1/8 of input size. CHECK_GE(mask.Size() * sizeof(DType), dropout_reserve_byte_) << "The size of the mask space is smaller than the required cudnn reserved space."; CUDNN_CALL(cudnnDropoutForward(s->dnn_handle_, dropout_desc_, x_desc_, in.dptr<DType>(), y_desc_, out.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } inline void CuDNNBackward(const OpContext &ctx, const TBlob &out_grad, const TBlob &mask, const TBlob &in_grad) { Stream<xpu> *s = ctx.get_stream<xpu>(); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = in_grad.Size(); stride[0] = in_grad.Size(); stride[1] = in_grad.Size(); stride[2] = in_grad.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(dy_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(dx_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutBackward(s->dnn_handle_, dropout_desc_, dy_desc_, out_grad.dptr<DType>(), dx_desc_, in_grad.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data) { this->dropout_passthrough_ = true; if (req[dropout::kOut] != kNullOp) { CHECK_EQ(in_data.size(), 1U); if (ctx.is_train) { CHECK_EQ(out_data.size(), 2U); } Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob &in = in_data[dropout::kData]; const TBlob &out = out_data[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; if (this->pkeep_ < 1 && (ctx.is_train || this->mode_ == dropout::kAlways)) { this->dropout_passthrough_ = false; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLForward(ctx, in_data, out_data); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNForward(ctx, in, mask, out); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); CHECK(req[dropout::kOut] != kAddTo); LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(), out.dptr<DType>(), mask.dptr<DType>(), in.dptr<DType>(), this->pkeep_); return; } else { RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); // initialize the mask LaunchRNG<BernoulliKernel, xpu>(s, pgen, mask.Size(), mask.dptr<DType>(), this->pkeep_); // broadcast mul mxnet::TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(in.shape_, mask.shape_, out.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[dropout::kOut], lstride, rstride, oshape, in.dptr<DType>(), mask.dptr<DType>(), out.dptr<DType>()); }); } } } else { if (req[dropout::kOut] == kWriteInplace) return; MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>()); }); } } } void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); if (!this->dropout_passthrough_) { this->dropout_passthrough_ = true; const TBlob &gdata = in_grad[dropout::kData]; const TBlob &grad = out_grad[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLBackward(ctx, in_grad, out_data, out_grad); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNBackward(ctx, grad, mask, gdata); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) // standard case for dropout CHECK_EQ(grad.Size(), mask.Size()); MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); return; } else { // broardcast mul mxnet::TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(grad.shape_, mask.shape_, gdata.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[0], lstride, rstride, oshape, grad.dptr<DType>(), mask.dptr<DType>(), gdata.dptr<DType>()); }); } } } else { const TBlob& gdata = in_grad[dropout::kData]; const TBlob& grad = out_grad[dropout::kOut]; MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>()); }); } } private: /*! \brief Dropout rate (keep when the generated random number is less than this value) */ real_t pkeep_; /*! \brief Dropout mode */ dropout::DropoutOpMode mode_; /*! \brief Axes on which dropout mask is shared in the form of broadcast multiply */ mxnet::TShape axes_; /*! \brief Flag to record whether forward is executed in pass-through mode */ bool dropout_passthrough_; #if MXNET_USE_CUDNN_DROPOUT bool cudnn_off_; Context ctx_; cudnnDataType_t dtype_; cudnnDropoutDescriptor_t dropout_desc_; size_t dropout_reserve_byte_; cudnnTensorDescriptor_t x_desc_, y_desc_, dx_desc_, dy_desc_; #endif // MXNET_USE_CUDNN_DROPOUT }; // class DropoutOp template<typename xpu> void DropoutCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Forward(ctx, inputs, req, outputs); }); } template<typename xpu> void DropoutGradCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1); CHECK_EQ(req.size(), 1); std::vector<TBlob> out_grads(2); std::vector<TBlob> out_data(2); out_grads[dropout::kOut] = inputs[0]; out_data[dropout::kMask] = inputs[1]; MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Backward(ctx, out_grads, out_data, req, outputs); }); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
server.c
// C99 // Start program: mpirun -np 1 server #include <mpi.h> #include <omp.h> #include <stdio.h> #include <stdbool.h> #include <unistd.h> // needed for sleep() on POSIX system #define MAX_DATA 100 int main( int argc, char **argv ) { int providedThreadSupport; bool terminateListening = false; char portName[MPI_MAX_PORT_NAME]; MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &providedThreadSupport); if (MPI_THREAD_MULTIPLE != providedThreadSupport) { printf( "Requested MPI thread support is not guaranteed.\n"); } MPI_Open_port(MPI_INFO_NULL, portName); printf("Server available at port:%s\n", portName); #pragma omp parallel num_threads(2) shared(portName,terminateListening) { // Use OpemMP section construct for function parallelism #pragma omp sections { #pragma omp section { // Do some work sleep(15); // Connect to yourself in order to terminate listening terminateListening = true; MPI_Comm dummy; MPI_Comm_connect(portName, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &dummy); printf("Server is connected to itself.\n"); MPI_Comm_disconnect(&dummy); printf("Server is disconnected.\n"); MPI_Close_port(portName); } #pragma omp section { // Listening section while (1) { MPI_Comm interClient = MPI_COMM_NULL; MPI_Comm_accept(portName, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &interClient); if (terminateListening == true) { break; } MPI_Status status; char clientName[MAX_DATA]; MPI_Recv(clientName, MAX_DATA, MPI_CHAR, MPI_ANY_SOURCE, MPI_ANY_TAG, interClient, &status); printf("Client is connected with name: %s\n", clientName); MPI_Comm_disconnect(&interClient); printf("Client is disconnected.\n"); } } } // End of sections } // End of parallel section MPI_Finalize(); return (0); }
// C99 // Start program: mpirun -np 1 server #include <mpi.h> #include <omp.h> #include <stdio.h> #include <stdbool.h> #include <unistd.h> // needed for sleep() on POSIX system #define MAX_DATA 100 int main( int argc, char **argv ) { int providedThreadSupport; bool terminateListening = false; char portName[MPI_MAX_PORT_NAME]; MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &providedThreadSupport); if (MPI_THREAD_MULTIPLE != providedThreadSupport) { printf( "Requested MPI thread support is not guaranteed.\n"); } MPI_Open_port(MPI_INFO_NULL, portName); printf("Server available at port:%s\n", portName); // Use OpemMP section construct for function parallelism #pragma omp sections { #pragma omp section { // Do some work sleep(15); // Connect to yourself in order to terminate listening terminateListening = true; MPI_Comm dummy; MPI_Comm_connect(portName, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &dummy); printf("Server is connected to itself.\n"); MPI_Comm_disconnect(&dummy); printf("Server is disconnected.\n"); MPI_Close_port(portName); } // Listening section while (1) { MPI_Comm interClient = MPI_COMM_NULL; MPI_Comm_accept(portName, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &interClient); if (terminateListening == true) { break; } MPI_Status status; char clientName[MAX_DATA]; MPI_Recv(clientName, MAX_DATA, MPI_CHAR, MPI_ANY_SOURCE, MPI_ANY_TAG, interClient, &status); printf("Client is connected with name: %s\n", clientName); MPI_Comm_disconnect(&interClient); printf("Client is disconnected.\n"); } } // End of sections // End of parallel section MPI_Finalize(); return (0); }
// C99 // Start program: mpirun -np 1 server #include <mpi.h> #include <omp.h> #include <stdio.h> #include <stdbool.h> #include <unistd.h> // needed for sleep() on POSIX system #define MAX_DATA 100 int main( int argc, char **argv ) { int providedThreadSupport; bool terminateListening = false; char portName[MPI_MAX_PORT_NAME]; MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &providedThreadSupport); if (MPI_THREAD_MULTIPLE != providedThreadSupport) { printf( "Requested MPI thread support is not guaranteed.\n"); } MPI_Open_port(MPI_INFO_NULL, portName); printf("Server available at port:%s\n", portName); #pragma omp parallel num_threads(2) shared(portName,terminateListening) { // Use OpemMP section construct for function parallelism #pragma omp sections { #pragma omp section { // Do some work sleep(15); // Connect to yourself in order to terminate listening terminateListening = true; MPI_Comm dummy; MPI_Comm_connect(portName, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &dummy); printf("Server is connected to itself.\n"); MPI_Comm_disconnect(&dummy); printf("Server is disconnected.\n"); MPI_Close_port(portName); } #pragma omp section { // Listening section while (1) { MPI_Comm interClient = MPI_COMM_NULL; MPI_Comm_accept(portName, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &interClient); if (terminateListening == true) { break; } MPI_Status status; char clientName[MAX_DATA]; MPI_Recv(clientName, MAX_DATA, MPI_CHAR, MPI_ANY_SOURCE, MPI_ANY_TAG, interClient, &status); printf("Client is connected with name: %s\n", clientName); MPI_Comm_disconnect(&interClient); printf("Client is disconnected.\n"); } } } // End of sections } // End of parallel section MPI_Finalize(); return (0); }